aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/00-INDEX6
-rw-r--r--Documentation/DocBook/dvb/dvbproperty.xml5
-rw-r--r--Documentation/DocBook/media-entities.tmpl7
-rw-r--r--Documentation/DocBook/mtdnand.tmpl3
-rw-r--r--Documentation/DocBook/v4l/media-controller.xml6
-rw-r--r--Documentation/DocBook/v4l/pixfmt.xml1
-rw-r--r--Documentation/DocBook/v4l/subdev-formats.xml10
-rw-r--r--Documentation/accounting/getdelays.c38
-rw-r--r--Documentation/arm/Booting33
-rw-r--r--Documentation/arm/Samsung/Overview.txt2
-rw-r--r--Documentation/atomic_ops.txt2
-rw-r--r--Documentation/cgroups/cgroups.txt41
-rw-r--r--Documentation/devicetree/booting-without-of.txt48
-rw-r--r--Documentation/filesystems/configfs/configfs_example_explicit.c6
-rw-r--r--Documentation/filesystems/configfs/configfs_example_macros.c6
-rw-r--r--Documentation/filesystems/nfs/idmapper.txt4
-rw-r--r--Documentation/networking/dns_resolver.txt4
-rw-r--r--Documentation/power/regulator/machine.txt4
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas14
-rw-r--r--Documentation/security/00-INDEX18
-rw-r--r--Documentation/security/SELinux.txt (renamed from Documentation/SELinux.txt)0
-rw-r--r--Documentation/security/Smack.txt (renamed from Documentation/Smack.txt)0
-rw-r--r--Documentation/security/apparmor.txt (renamed from Documentation/apparmor.txt)0
-rw-r--r--Documentation/security/credentials.txt (renamed from Documentation/credentials.txt)2
-rw-r--r--Documentation/security/keys-request-key.txt (renamed from Documentation/keys-request-key.txt)4
-rw-r--r--Documentation/security/keys-trusted-encrypted.txt (renamed from Documentation/keys-trusted-encrypted.txt)0
-rw-r--r--Documentation/security/keys.txt (renamed from Documentation/keys.txt)4
-rw-r--r--Documentation/security/tomoyo.txt (renamed from Documentation/tomoyo.txt)0
-rw-r--r--Documentation/sysctl/kernel.txt3
-rw-r--r--MAINTAINERS31
-rw-r--r--arch/alpha/Kconfig4
-rw-r--r--arch/alpha/include/asm/unistd.h3
-rw-r--r--arch/alpha/kernel/systbls.S1
-rw-r--r--arch/arm/Kconfig29
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/common/Kconfig2
-rw-r--r--arch/arm/configs/at572d940hfek_defconfig358
-rw-r--r--arch/arm/configs/at91sam9261_defconfig (renamed from arch/arm/configs/at91sam9261ek_defconfig)85
-rw-r--r--arch/arm/configs/at91sam9263_defconfig (renamed from arch/arm/configs/at91sam9263ek_defconfig)84
-rw-r--r--arch/arm/configs/exynos4_defconfig2
-rw-r--r--arch/arm/configs/neocore926_defconfig104
-rw-r--r--arch/arm/configs/s5p6442_defconfig65
-rw-r--r--arch/arm/configs/usb-a9263_defconfig106
-rw-r--r--arch/arm/include/asm/bitops.h46
-rw-r--r--arch/arm/include/asm/fiq.h23
-rw-r--r--arch/arm/include/asm/mach/arch.h9
-rw-r--r--arch/arm/include/asm/page.h2
-rw-r--r--arch/arm/include/asm/prom.h37
-rw-r--r--arch/arm/include/asm/setup.h4
-rw-r--r--arch/arm/include/asm/smp.h1
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/kernel/Makefile3
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/devtree.c145
-rw-r--r--arch/arm/kernel/fiq.c45
-rw-r--r--arch/arm/kernel/fiqasm.S49
-rw-r--r--arch/arm/kernel/head-common.S24
-rw-r--r--arch/arm/kernel/head.S15
-rw-r--r--arch/arm/kernel/setup.c90
-rw-r--r--arch/arm/kernel/smp.c1
-rw-r--r--arch/arm/lib/lib1funcs.S25
-rw-r--r--arch/arm/mach-at91/Kconfig40
-rw-r--r--arch/arm/mach-at91/Makefile4
-rw-r--r--arch/arm/mach-at91/at572d940hf.c377
-rw-r--r--arch/arm/mach-at91/at572d940hf_devices.c970
-rw-r--r--arch/arm/mach-at91/at91cap9.c41
-rw-r--r--arch/arm/mach-at91/at91cap9_devices.c24
-rw-r--r--arch/arm/mach-at91/at91rm9200.c53
-rw-r--r--arch/arm/mach-at91/at91rm9200_devices.c24
-rw-r--r--arch/arm/mach-at91/at91sam9260.c48
-rw-r--r--arch/arm/mach-at91/at91sam9260_devices.c26
-rw-r--r--arch/arm/mach-at91/at91sam9261.c41
-rw-r--r--arch/arm/mach-at91/at91sam9261_devices.c21
-rw-r--r--arch/arm/mach-at91/at91sam9263.c39
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c20
-rw-r--r--arch/arm/mach-at91/at91sam9g45.c64
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c27
-rw-r--r--arch/arm/mach-at91/at91sam9rl.c40
-rw-r--r--arch/arm/mach-at91/at91sam9rl_devices.c23
-rw-r--r--arch/arm/mach-at91/at91x40.c5
-rw-r--r--arch/arm/mach-at91/board-1arm.c12
-rw-r--r--arch/arm/mach-at91/board-afeb-9260v1.c6
-rw-r--r--arch/arm/mach-at91/board-at572d940hf_ek.c326
-rw-r--r--arch/arm/mach-at91/board-cam60.c6
-rw-r--r--arch/arm/mach-at91/board-cap9adk.c13
-rw-r--r--arch/arm/mach-at91/board-carmeva.c8
-rw-r--r--arch/arm/mach-at91/board-cpu9krea.c6
-rw-r--r--arch/arm/mach-at91/board-cpuat91.c12
-rw-r--r--arch/arm/mach-at91/board-csb337.c8
-rw-r--r--arch/arm/mach-at91/board-csb637.c8
-rw-r--r--arch/arm/mach-at91/board-eb01.c4
-rw-r--r--arch/arm/mach-at91/board-eb9200.c8
-rw-r--r--arch/arm/mach-at91/board-ecbat91.c12
-rw-r--r--arch/arm/mach-at91/board-eco920.c32
-rw-r--r--arch/arm/mach-at91/board-flexibity.c6
-rw-r--r--arch/arm/mach-at91/board-foxg20.c6
-rw-r--r--arch/arm/mach-at91/board-gsia18s.c8
-rw-r--r--arch/arm/mach-at91/board-kafa.c12
-rw-r--r--arch/arm/mach-at91/board-kb9202.c13
-rw-r--r--arch/arm/mach-at91/board-neocore926.c6
-rw-r--r--arch/arm/mach-at91/board-pcontrol-g20.c8
-rw-r--r--arch/arm/mach-at91/board-picotux200.c8
-rw-r--r--arch/arm/mach-at91/board-qil-a9260.c6
-rw-r--r--arch/arm/mach-at91/board-rm9200dk.c8
-rw-r--r--arch/arm/mach-at91/board-rm9200ek.c8
-rw-r--r--arch/arm/mach-at91/board-sam9-l9260.c6
-rw-r--r--arch/arm/mach-at91/board-sam9260ek.c13
-rw-r--r--arch/arm/mach-at91/board-sam9261ek.c13
-rw-r--r--arch/arm/mach-at91/board-sam9263ek.c13
-rw-r--r--arch/arm/mach-at91/board-sam9g20ek.c17
-rw-r--r--arch/arm/mach-at91/board-sam9m10g45ek.c13
-rw-r--r--arch/arm/mach-at91/board-sam9rlek.c6
-rw-r--r--arch/arm/mach-at91/board-snapper9260.c6
-rw-r--r--arch/arm/mach-at91/board-stamp9g20.c18
-rw-r--r--arch/arm/mach-at91/board-usb-a9260.c6
-rw-r--r--arch/arm/mach-at91/board-usb-a9263.c6
-rw-r--r--arch/arm/mach-at91/board-yl-9200.c12
-rw-r--r--arch/arm/mach-at91/clock.c69
-rw-r--r--arch/arm/mach-at91/clock.h20
-rw-r--r--arch/arm/mach-at91/generic.h30
-rw-r--r--arch/arm/mach-at91/include/mach/at572d940hf.h123
-rw-r--r--arch/arm/mach-at91/include/mach/at572d940hf_matrix.h123
-rw-r--r--arch/arm/mach-at91/include/mach/at91cap9.h4
-rw-r--r--arch/arm/mach-at91/include/mach/at91rm9200.h2
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9260.h2
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9261.h2
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9263.h2
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9g45.h4
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9rl.h2
-rw-r--r--arch/arm/mach-at91/include/mach/at91x40.h2
-rw-r--r--arch/arm/mach-at91/include/mach/board.h6
-rw-r--r--arch/arm/mach-at91/include/mach/clkdev.h7
-rw-r--r--arch/arm/mach-at91/include/mach/cpu.h15
-rw-r--r--arch/arm/mach-at91/include/mach/hardware.h15
-rw-r--r--arch/arm/mach-at91/include/mach/memory.h2
-rw-r--r--arch/arm/mach-at91/include/mach/stamp9g20.h2
-rw-r--r--arch/arm/mach-at91/include/mach/system_rev.h25
-rw-r--r--arch/arm/mach-at91/include/mach/timex.h5
-rw-r--r--arch/arm/mach-davinci/da850.c2
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c16
-rw-r--r--arch/arm/mach-davinci/devices.c3
-rw-r--r--arch/arm/mach-davinci/include/mach/da8xx.h4
-rw-r--r--arch/arm/mach-davinci/include/mach/hardware.h3
-rw-r--r--arch/arm/mach-exynos4/Kconfig2
-rw-r--r--arch/arm/mach-exynos4/Makefile1
-rw-r--r--arch/arm/mach-exynos4/cpuidle.c86
-rw-r--r--arch/arm/mach-exynos4/mach-nuri.c89
-rw-r--r--arch/arm/mach-gemini/board-wbd111.c7
-rw-r--r--arch/arm/mach-gemini/board-wbd222.c7
-rw-r--r--arch/arm/mach-ixp4xx/ixdp425-setup.c4
-rw-r--r--arch/arm/mach-netx/fb.c1
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c27
-rw-r--r--arch/arm/mach-pxa/Kconfig1
-rw-r--r--arch/arm/mach-s3c2410/mach-amlm5900.c5
-rw-r--r--arch/arm/mach-s3c2410/mach-tct_hammer.c6
-rw-r--r--arch/arm/mach-s3c64xx/dev-spi.c20
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-a.h48
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-b.h60
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-c.h53
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-d.h49
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-e.h44
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-f.h71
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-g.h42
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-h.h74
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-i.h40
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-j.h36
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-n.h54
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-o.h70
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-p.h69
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio-bank-q.h46
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/pm.c34
-rw-r--r--arch/arm/mach-s3c64xx/setup-i2c0.c7
-rw-r--r--arch/arm/mach-s3c64xx/setup-i2c1.c7
-rw-r--r--arch/arm/mach-s3c64xx/sleep.S8
-rw-r--r--arch/arm/mach-s5p6442/Kconfig25
-rw-r--r--arch/arm/mach-s5p6442/Makefile24
-rw-r--r--arch/arm/mach-s5p6442/Makefile.boot2
-rw-r--r--arch/arm/mach-s5p6442/clock.c420
-rw-r--r--arch/arm/mach-s5p6442/cpu.c143
-rw-r--r--arch/arm/mach-s5p6442/dev-audio.c217
-rw-r--r--arch/arm/mach-s5p6442/dev-spi.c121
-rw-r--r--arch/arm/mach-s5p6442/dma.c105
-rw-r--r--arch/arm/mach-s5p6442/include/mach/debug-macro.S35
-rw-r--r--arch/arm/mach-s5p6442/include/mach/entry-macro.S48
-rw-r--r--arch/arm/mach-s5p6442/include/mach/gpio.h123
-rw-r--r--arch/arm/mach-s5p6442/include/mach/hardware.h18
-rw-r--r--arch/arm/mach-s5p6442/include/mach/io.h17
-rw-r--r--arch/arm/mach-s5p6442/include/mach/irqs.h87
-rw-r--r--arch/arm/mach-s5p6442/include/mach/map.h76
-rw-r--r--arch/arm/mach-s5p6442/include/mach/memory.h19
-rw-r--r--arch/arm/mach-s5p6442/include/mach/pwm-clock.h70
-rw-r--r--arch/arm/mach-s5p6442/include/mach/regs-clock.h104
-rw-r--r--arch/arm/mach-s5p6442/include/mach/regs-irq.h19
-rw-r--r--arch/arm/mach-s5p6442/include/mach/spi-clocks.h17
-rw-r--r--arch/arm/mach-s5p6442/include/mach/system.h23
-rw-r--r--arch/arm/mach-s5p6442/include/mach/tick.h26
-rw-r--r--arch/arm/mach-s5p6442/include/mach/timex.h24
-rw-r--r--arch/arm/mach-s5p6442/include/mach/uncompress.h24
-rw-r--r--arch/arm/mach-s5p6442/include/mach/vmalloc.h17
-rw-r--r--arch/arm/mach-s5p6442/init.c44
-rw-r--r--arch/arm/mach-s5p6442/mach-smdk6442.c102
-rw-r--r--arch/arm/mach-s5p6442/setup-i2c0.c28
-rw-r--r--arch/arm/mach-ux500/board-mop500-sdi.c16
-rw-r--r--arch/arm/mach-ux500/devices-common.h10
-rw-r--r--arch/arm/mach-ux500/devices-db5500.h28
-rw-r--r--arch/arm/mach-ux500/devices-db8500.h34
-rw-r--r--arch/arm/mach-ux500/include/mach/hardware.h3
-rw-r--r--arch/arm/mm/cache-v6.S1
-rw-r--r--arch/arm/mm/cache-v7.S2
-rw-r--r--arch/arm/mm/context.c17
-rw-r--r--arch/arm/mm/init.c15
-rw-r--r--arch/arm/mm/mm.h7
-rw-r--r--arch/arm/mm/mmu.c9
-rw-r--r--arch/arm/mm/proc-v6.S4
-rw-r--r--arch/arm/mm/proc-v7.S14
-rw-r--r--arch/arm/plat-s5p/Kconfig2
-rw-r--r--arch/arm/plat-s5p/cpu.c10
-rw-r--r--arch/arm/plat-s5p/include/plat/s5p6442.h33
-rw-r--r--arch/arm/plat-samsung/include/plat/cpu.h1
-rw-r--r--arch/arm/plat-samsung/include/plat/debug-macro.S2
-rw-r--r--arch/arm/plat-samsung/include/plat/devs.h6
-rw-r--r--arch/arm/plat-samsung/include/plat/regs-serial.h2
-rw-r--r--arch/arm/plat-samsung/include/plat/s3c64xx-spi.h1
-rw-r--r--arch/avr32/include/asm/bitops.h15
-rw-r--r--arch/avr32/include/asm/unistd.h3
-rw-r--r--arch/avr32/kernel/syscall_table.S1
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c4
-rw-r--r--arch/avr32/mach-at32ap/include/mach/board.h1
-rw-r--r--arch/blackfin/Kconfig3
-rw-r--r--arch/blackfin/include/asm/kgdb.h1
-rw-r--r--arch/blackfin/include/asm/ptrace.h5
-rw-r--r--arch/blackfin/include/asm/unistd.h3
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c4
-rw-r--r--arch/blackfin/mach-bf561/boards/acvilon.c4
-rw-r--r--arch/blackfin/mach-common/entry.S1
-rw-r--r--arch/cris/Kconfig5
-rw-r--r--arch/cris/arch-v10/drivers/axisflashmap.c10
-rw-r--r--arch/cris/arch-v10/kernel/entry.S1
-rw-r--r--arch/cris/arch-v32/drivers/Kconfig1
-rw-r--r--arch/cris/arch-v32/drivers/axisflashmap.c7
-rw-r--r--arch/cris/arch-v32/kernel/entry.S1
-rw-r--r--arch/cris/include/asm/unistd.h3
-rw-r--r--arch/frv/Kconfig8
-rw-r--r--arch/frv/include/asm/suspend.h20
-rw-r--r--arch/frv/include/asm/unistd.h3
-rw-r--r--arch/frv/kernel/entry.S1
-rw-r--r--arch/h8300/Kconfig8
-rw-r--r--arch/h8300/include/asm/unistd.h3
-rw-r--r--arch/h8300/kernel/syscalls.S1
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/include/asm/unistd.h3
-rw-r--r--arch/ia64/kernel/entry.S1
-rw-r--r--arch/m32r/Kconfig8
-rw-r--r--arch/m32r/include/asm/smp.h4
-rw-r--r--arch/m32r/include/asm/unistd.h3
-rw-r--r--arch/m32r/kernel/smp.c64
-rw-r--r--arch/m32r/kernel/smpboot.c48
-rw-r--r--arch/m32r/kernel/syscall_table.S1
-rw-r--r--arch/m68k/Kconfig.nommu4
-rw-r--r--arch/m68k/include/asm/bitops_mm.h8
-rw-r--r--arch/m68k/include/asm/bitops_no.h4
-rw-r--r--arch/m68k/include/asm/unistd.h3
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/microblaze/Kconfig6
-rw-r--r--arch/microblaze/include/asm/unistd.h3
-rw-r--r--arch/microblaze/kernel/prom.c2
-rw-r--r--arch/microblaze/kernel/syscall_table.S1
-rw-r--r--arch/mips/Kconfig8
-rw-r--r--arch/mips/cavium-octeon/flash_setup.c11
-rw-r--r--arch/mips/configs/bcm47xx_defconfig1
-rw-r--r--arch/mips/include/asm/prom.h3
-rw-r--r--arch/mips/include/asm/suspend.h2
-rw-r--r--arch/mips/include/asm/unistd.h15
-rw-r--r--arch/mips/kernel/prom.c3
-rw-r--r--arch/mips/kernel/scall32-o32.S1
-rw-r--r--arch/mips/kernel/scall64-64.S1
-rw-r--r--arch/mips/kernel/scall64-n32.S1
-rw-r--r--arch/mips/kernel/scall64-o32.S1
-rw-r--r--arch/mips/txx9/generic/setup.c3
-rw-r--r--arch/mn10300/Kconfig3
-rw-r--r--arch/mn10300/configs/asb2364_defconfig1
-rw-r--r--arch/mn10300/include/asm/unistd.h3
-rw-r--r--arch/mn10300/kernel/entry.S1
-rw-r--r--arch/parisc/Kconfig8
-rw-r--r--arch/parisc/include/asm/unistd.h3
-rw-r--r--arch/parisc/kernel/syscall_table.S1
-rw-r--r--arch/powerpc/Kconfig8
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/include/asm/fsl_lbc.h2
-rw-r--r--arch/powerpc/include/asm/rio.h5
-rw-r--r--arch/powerpc/include/asm/suspend.h6
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h3
-rw-r--r--arch/powerpc/kernel/prom.c2
-rw-r--r--arch/powerpc/kernel/swsusp.c1
-rw-r--r--arch/powerpc/kernel/traps.c13
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c9
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c100
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/appldata/appldata_mem.c2
-rw-r--r--arch/s390/include/asm/bitops.h45
-rw-r--r--arch/s390/include/asm/delay.h8
-rw-r--r--arch/s390/include/asm/irq.h8
-rw-r--r--arch/s390/include/asm/s390_ext.h17
-rw-r--r--arch/s390/include/asm/suspend.h10
-rw-r--r--arch/s390/include/asm/topology.h4
-rw-r--r--arch/s390/include/asm/uaccess.h11
-rw-r--r--arch/s390/include/asm/unistd.h3
-rw-r--r--arch/s390/kernel/Makefile8
-rw-r--r--arch/s390/kernel/compat_wrapper.S6
-rw-r--r--arch/s390/kernel/dis.c2
-rw-r--r--arch/s390/kernel/irq.c137
-rw-r--r--arch/s390/kernel/s390_ext.c108
-rw-r--r--arch/s390/kernel/smp.c1
-rw-r--r--arch/s390/kernel/syscalls.S1
-rw-r--r--arch/s390/kernel/time.c1
-rw-r--r--arch/s390/kernel/topology.c1
-rw-r--r--arch/s390/kernel/traps.c1
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/lib/delay.c15
-rw-r--r--arch/s390/mm/fault.c62
-rw-r--r--arch/s390/mm/init.c2
-rw-r--r--arch/s390/oprofile/hwsampler.c4
-rw-r--r--arch/score/Kconfig3
-rw-r--r--arch/sh/Kconfig6
-rw-r--r--arch/sh/configs/apsh4ad0a_defconfig1
-rw-r--r--arch/sh/configs/sdk7786_defconfig1
-rw-r--r--arch/sh/configs/se7206_defconfig1
-rw-r--r--arch/sh/configs/shx3_defconfig1
-rw-r--r--arch/sh/configs/urquell_defconfig1
-rw-r--r--arch/sh/include/asm/kgdb.h1
-rw-r--r--arch/sh/include/asm/ptrace.h6
-rw-r--r--arch/sh/include/asm/suspend.h1
-rw-r--r--arch/sh/include/asm/unistd_32.h3
-rw-r--r--arch/sh/include/asm/unistd_64.h3
-rw-r--r--arch/sh/kernel/syscalls_32.S1
-rw-r--r--arch/sh/kernel/syscalls_64.S1
-rw-r--r--arch/sparc/Kconfig8
-rw-r--r--arch/sparc/include/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/tile/Kconfig1
-rw-r--r--arch/um/Kconfig.x861
-rw-r--r--arch/unicore32/include/asm/suspend.h1
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/configs/i386_defconfig1
-rw-r--r--arch/x86/configs/x86_64_defconfig1
-rw-r--r--arch/x86/ia32/ia32entry.S1
-rw-r--r--arch/x86/include/asm/kgdb.h1
-rw-r--r--arch/x86/include/asm/ptrace.h18
-rw-r--r--arch/x86/include/asm/suspend_32.h2
-rw-r--r--arch/x86/include/asm/suspend_64.h5
-rw-r--r--arch/x86/include/asm/unistd_32.h3
-rw-r--r--arch/x86/include/asm/unistd_64.h2
-rw-r--r--arch/x86/kernel/syscall_table_32.S1
-rw-r--r--arch/x86/xen/mmu.c284
-rw-r--r--arch/x86/xen/mmu.h37
-rw-r--r--arch/xtensa/Kconfig6
-rw-r--r--arch/xtensa/include/asm/unistd.h4
-rw-r--r--block/blk-cgroup.c18
-rw-r--r--block/blk-core.c5
-rw-r--r--block/genhd.c2
-rw-r--r--drivers/amba/bus.c5
-rw-r--r--drivers/bcma/host_pci.c1
-rw-r--r--drivers/block/brd.c42
-rw-r--r--drivers/block/loop.c17
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c138
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c88
-rw-r--r--drivers/char/mspec.c5
-rw-r--r--drivers/char/ppdev.c1
-rw-r--r--drivers/edac/amd76x_edac.c2
-rw-r--r--drivers/edac/amd8111_edac.c2
-rw-r--r--drivers/edac/amd8131_edac.c2
-rw-r--r--drivers/edac/cpc925_edac.c2
-rw-r--r--drivers/edac/e752x_edac.c2
-rw-r--r--drivers/edac/e7xxx_edac.c2
-rw-r--r--drivers/edac/edac_core.h12
-rw-r--r--drivers/edac/edac_device.c24
-rw-r--r--drivers/edac/edac_mc.c16
-rw-r--r--drivers/edac/edac_module.c2
-rw-r--r--drivers/edac/edac_pci.c21
-rw-r--r--drivers/edac/i5000_edac.c2
-rw-r--r--drivers/edac/i5400_edac.c2
-rw-r--r--drivers/edac/i7300_edac.c2
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/i82860_edac.c2
-rw-r--r--drivers/edac/i82875p_edac.c2
-rw-r--r--drivers/edac/i82975x_edac.c2
-rw-r--r--drivers/edac/mpc85xx_edac.h2
-rw-r--r--drivers/edac/mv64x60_edac.h2
-rw-r--r--drivers/edac/ppc4xx_edac.c2
-rw-r--r--drivers/edac/r82600_edac.c2
-rw-r--r--drivers/gpio/Kconfig7
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/tps65910-gpio.c100
-rw-r--r--drivers/hwmon/coretemp.c16
-rw-r--r--drivers/hwmon/pmbus_core.c1
-rw-r--r--drivers/isdn/hardware/eicon/divasfunc.c5
-rw-r--r--drivers/media/dvb/dm1105/dm1105.c272
-rw-r--r--drivers/media/dvb/dvb-usb/lmedm04.c107
-rw-r--r--drivers/media/dvb/frontends/stb0899_algo.c2
-rw-r--r--drivers/media/dvb/frontends/tda8261.c1
-rw-r--r--drivers/media/radio/radio-maxiradio.c3
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c2
-rw-r--r--drivers/media/rc/Kconfig12
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/fintek-cir.c684
-rw-r--r--drivers/media/rc/fintek-cir.h243
-rw-r--r--drivers/media/rc/keymaps/rc-lme2510.c134
-rw-r--r--drivers/media/video/Kconfig6
-rw-r--r--drivers/media/video/Makefile1
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c4
-rw-r--r--drivers/media/video/cx231xx/cx231xx-avcore.c2
-rw-r--r--drivers/media/video/gspca/kinect.c2
-rw-r--r--drivers/media/video/m5mols/Kconfig5
-rw-r--r--drivers/media/video/m5mols/Makefile3
-rw-r--r--drivers/media/video/m5mols/m5mols.h296
-rw-r--r--drivers/media/video/m5mols/m5mols_capture.c191
-rw-r--r--drivers/media/video/m5mols/m5mols_controls.c299
-rw-r--r--drivers/media/video/m5mols/m5mols_core.c1004
-rw-r--r--drivers/media/video/m5mols/m5mols_reg.h399
-rw-r--r--drivers/media/video/uvc/Makefile3
-rw-r--r--drivers/media/video/uvc/uvc_driver.c66
-rw-r--r--drivers/media/video/uvc/uvc_entity.c118
-rw-r--r--drivers/media/video/uvc/uvcvideo.h20
-rw-r--r--drivers/mfd/Kconfig9
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/tps65910-irq.c218
-rw-r--r--drivers/mfd/tps65910.c229
-rw-r--r--drivers/mfd/tps65911-comparator.c188
-rw-r--r--drivers/misc/kgdbts.c29
-rw-r--r--drivers/mmc/host/mmci.c25
-rw-r--r--drivers/mtd/Kconfig18
-rw-r--r--drivers/mtd/Makefile3
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c10
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c10
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c1
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/doc2000.c4
-rw-r--r--drivers/mtd/devices/doc2001.c4
-rw-r--r--drivers/mtd/devices/doc2001plus.c4
-rw-r--r--drivers/mtd/devices/lart.c9
-rw-r--r--drivers/mtd/devices/m25p80.c109
-rw-r--r--drivers/mtd/devices/ms02-nv.c4
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c45
-rw-r--r--drivers/mtd/devices/mtdram.c5
-rw-r--r--drivers/mtd/devices/phram.c4
-rw-r--r--drivers/mtd/devices/pmc551.c6
-rw-r--r--drivers/mtd/devices/slram.c4
-rw-r--r--drivers/mtd/devices/sst25l.c68
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c8
-rw-r--r--drivers/mtd/maps/Kconfig23
-rw-r--r--drivers/mtd/maps/amd76xrom.c4
-rw-r--r--drivers/mtd/maps/autcpu12-nvram.c4
-rw-r--r--drivers/mtd/maps/bcm963xx-flash.c6
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c21
-rw-r--r--drivers/mtd/maps/cdb89712.c12
-rw-r--r--drivers/mtd/maps/ceiva.c6
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c4
-rw-r--r--drivers/mtd/maps/ck804xrom.c4
-rw-r--r--drivers/mtd/maps/dbox2-flash.c4
-rw-r--r--drivers/mtd/maps/dc21285.c20
-rw-r--r--drivers/mtd/maps/dilnetpc.c9
-rw-r--r--drivers/mtd/maps/dmv182.c4
-rw-r--r--drivers/mtd/maps/edb7312.c26
-rw-r--r--drivers/mtd/maps/esb2rom.c4
-rw-r--r--drivers/mtd/maps/fortunet.c7
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c27
-rw-r--r--drivers/mtd/maps/h720x-flash.c6
-rw-r--r--drivers/mtd/maps/ichxrom.c4
-rw-r--r--drivers/mtd/maps/impa7.c22
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c19
-rw-r--r--drivers/mtd/maps/ixp2000.c4
-rw-r--r--drivers/mtd/maps/ixp4xx.c16
-rw-r--r--drivers/mtd/maps/l440gx.c4
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c45
-rw-r--r--drivers/mtd/maps/mbx860.c6
-rw-r--r--drivers/mtd/maps/netsc520.c4
-rw-r--r--drivers/mtd/maps/nettel.c12
-rw-r--r--drivers/mtd/maps/octagon-5066.c4
-rw-r--r--drivers/mtd/maps/pci.c4
-rw-r--r--drivers/mtd/maps/pcmciamtd.c4
-rw-r--r--drivers/mtd/maps/physmap.c34
-rw-r--r--drivers/mtd/maps/physmap_of.c30
-rw-r--r--drivers/mtd/maps/plat-ram.c24
-rw-r--r--drivers/mtd/maps/pmcmsp-flash.c6
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c18
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c24
-rw-r--r--drivers/mtd/maps/rpxlite.c4
-rw-r--r--drivers/mtd/maps/sa1100-flash.c21
-rw-r--r--drivers/mtd/maps/sbc_gxx.c4
-rw-r--r--drivers/mtd/maps/sc520cdp.c8
-rw-r--r--drivers/mtd/maps/scb2_flash.c6
-rw-r--r--drivers/mtd/maps/scx200_docflash.c16
-rw-r--r--drivers/mtd/maps/solutionengine.c12
-rw-r--r--drivers/mtd/maps/sun_uflash.c4
-rw-r--r--drivers/mtd/maps/tqm8xxl.c20
-rw-r--r--drivers/mtd/maps/ts5500_flash.c4
-rw-r--r--drivers/mtd/maps/tsunami_flash.c4
-rw-r--r--drivers/mtd/maps/uclinux.c12
-rw-r--r--drivers/mtd/maps/vmax301.c4
-rw-r--r--drivers/mtd/maps/vmu-flash.c4
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c15
-rw-r--r--drivers/mtd/mtd_blkdevs.c24
-rw-r--r--drivers/mtd/mtdchar.c55
-rw-r--r--drivers/mtd/mtdconcat.c4
-rw-r--r--drivers/mtd/mtdcore.c167
-rw-r--r--drivers/mtd/mtdcore.h6
-rw-r--r--drivers/mtd/mtdpart.c9
-rw-r--r--drivers/mtd/mtdswap.c8
-rw-r--r--drivers/mtd/nand/Kconfig5
-rw-r--r--drivers/mtd/nand/alauda.c4
-rw-r--r--drivers/mtd/nand/ams-delta.c4
-rw-r--r--drivers/mtd/nand/atmel_nand.c13
-rw-r--r--drivers/mtd/nand/au1550nd.c3
-rw-r--r--drivers/mtd/nand/autcpu12.c16
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c4
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c7
-rw-r--r--drivers/mtd/nand/cafe_nand.c11
-rw-r--r--drivers/mtd/nand/cmx270_nand.c2
-rw-r--r--drivers/mtd/nand/cs553x_nand.c19
-rw-r--r--drivers/mtd/nand/davinci_nand.c51
-rw-r--r--drivers/mtd/nand/denali.c247
-rw-r--r--drivers/mtd/nand/denali.h373
-rw-r--r--drivers/mtd/nand/diskonchip.c18
-rw-r--r--drivers/mtd/nand/edb7312.c9
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c12
-rw-r--r--drivers/mtd/nand/fsl_upm.c12
-rw-r--r--drivers/mtd/nand/fsmc_nand.c25
-rw-r--r--drivers/mtd/nand/gpio.c4
-rw-r--r--drivers/mtd/nand/h1910.c5
-rw-r--r--drivers/mtd/nand/jz4740_nand.c10
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c12
-rw-r--r--drivers/mtd/nand/mxc_nand.c64
-rw-r--r--drivers/mtd/nand/nand_base.c18
-rw-r--r--drivers/mtd/nand/nand_bbt.c27
-rw-r--r--drivers/mtd/nand/nandsim.c4
-rw-r--r--drivers/mtd/nand/ndfc.c65
-rw-r--r--drivers/mtd/nand/nomadik_nand.c7
-rw-r--r--drivers/mtd/nand/nuc900_nand.c4
-rw-r--r--drivers/mtd/nand/omap2.c32
-rw-r--r--drivers/mtd/nand/orion_nand.c14
-rw-r--r--drivers/mtd/nand/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/plat_nand.c12
-rw-r--r--drivers/mtd/nand/ppchameleonevb.c15
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c13
-rw-r--r--drivers/mtd/nand/rtc_from4.c3
-rw-r--r--drivers/mtd/nand/s3c2410.c75
-rw-r--r--drivers/mtd/nand/sh_flctl.c2
-rw-r--r--drivers/mtd/nand/sharpsl.c12
-rw-r--r--drivers/mtd/nand/sm_common.c2
-rw-r--r--drivers/mtd/nand/socrates_nand.c16
-rw-r--r--drivers/mtd/nand/spia.c2
-rw-r--r--drivers/mtd/nand/tmio_nand.c10
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c14
-rw-r--r--drivers/mtd/onenand/Kconfig1
-rw-r--r--drivers/mtd/onenand/generic.c16
-rw-r--r--drivers/mtd/onenand/omap2.c10
-rw-r--r--drivers/mtd/onenand/onenand_base.c54
-rw-r--r--drivers/mtd/onenand/onenand_sim.c3
-rw-r--r--drivers/mtd/onenand/samsung.c12
-rw-r--r--drivers/mtd/ubi/gluebi.c6
-rw-r--r--drivers/net/bonding/bond_main.c34
-rw-r--r--drivers/net/davinci_emac.c22
-rw-r--r--drivers/net/hamradio/baycom_epp.c2
-rw-r--r--drivers/net/hamradio/baycom_par.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c2
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/sfc/mtd.c6
-rw-r--r--drivers/net/wan/pc300_drv.c3
-rw-r--r--drivers/of/fdt.c8
-rw-r--r--drivers/parport/parport_ip32.c1
-rw-r--r--drivers/power/Kconfig16
-rw-r--r--drivers/power/Makefile2
-rw-r--r--drivers/power/bq27x00_battery.c11
-rw-r--r--drivers/power/ds2760_battery.c6
-rw-r--r--drivers/power/ds2780_battery.c853
-rw-r--r--drivers/power/gpio-charger.c15
-rw-r--r--drivers/power/isp1704_charger.c22
-rw-r--r--drivers/power/max8903_charger.c391
-rw-r--r--drivers/power/test_power.c276
-rw-r--r--drivers/power/z2_battery.c20
-rw-r--r--drivers/regulator/Kconfig6
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/core.c93
-rw-r--r--drivers/regulator/max8997.c13
-rw-r--r--drivers/regulator/max8998.c22
-rw-r--r--drivers/regulator/mc13892-regulator.c18
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c2
-rw-r--r--drivers/regulator/tps6105x-regulator.c1
-rw-r--r--drivers/regulator/tps65023-regulator.c3
-rw-r--r--drivers/regulator/tps6507x-regulator.c3
-rw-r--r--drivers/regulator/tps65910-regulator.c993
-rw-r--r--drivers/regulator/twl-regulator.c564
-rw-r--r--drivers/regulator/wm831x-dcdc.c2
-rw-r--r--drivers/regulator/wm8400-regulator.c12
-rw-r--r--drivers/rtc/Kconfig42
-rw-r--r--drivers/rtc/Makefile5
-rw-r--r--drivers/rtc/rtc-em3027.c161
-rw-r--r--drivers/rtc/rtc-m41t93.c225
-rw-r--r--drivers/rtc/rtc-mrst.c4
-rw-r--r--drivers/rtc/rtc-mxc.c6
-rw-r--r--drivers/rtc/rtc-pcf50633.c23
-rw-r--r--drivers/rtc/rtc-rv3029c2.c454
-rw-r--r--drivers/rtc/rtc-spear.c534
-rw-r--r--drivers/rtc/rtc-vt8500.c366
-rw-r--r--drivers/s390/block/dasd_diag.c6
-rw-r--r--drivers/s390/char/sclp.c7
-rw-r--r--drivers/s390/kvm/kvm_virtio.c3
-rw-r--r--drivers/scsi/aacraid/linit.c3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c2
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c4
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h1
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c11
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c26
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h16
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c27
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c13
-rw-r--r--drivers/scsi/fcoe/fcoe.c58
-rw-r--r--drivers/scsi/fcoe/fcoe.h10
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c133
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c40
-rw-r--r--drivers/scsi/in2000.c2
-rw-r--r--drivers/scsi/ipr.c12
-rw-r--r--drivers/scsi/libfc/fc_disc.c1
-rw-r--r--drivers/scsi/libfc/fc_exch.c2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c16
-rw-r--r--drivers/scsi/libfc/fc_libfc.h1
-rw-r--r--drivers/scsi/libsas/sas_ata.c60
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_phy.c4
-rw-r--r--drivers/scsi/libsas/sas_port.c21
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c14
-rw-r--r--drivers/scsi/lpfc/lpfc.h43
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c312
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2111
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h87
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c54
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h501
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c545
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c166
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c56
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1659
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h33
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c93
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c83
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h4
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c102
-rw-r--r--drivers/scsi/osst.c6
-rw-r--r--drivers/scsi/pmcraid.c9
-rw-r--r--drivers/scsi/pmcraid.h1
-rw-r--r--drivers/scsi/qla4xxx/Makefile2
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c69
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h11
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h23
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c22
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c77
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c19
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c68
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_error.c87
-rw-r--r--drivers/scsi/scsi_trace.c4
-rw-r--r--drivers/scsi/sd.c82
-rw-r--r--drivers/scsi/ultrastor.c2
-rw-r--r--drivers/scsi/wd33c93.c7
-rw-r--r--drivers/staging/generic_serial/rio/rioinit.c2
-rw-r--r--drivers/target/loopback/tcm_loop.c25
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_device.c29
-rw-r--r--drivers/target/target_core_pscsi.c4
-rw-r--r--drivers/target/target_core_tmr.c7
-rw-r--r--drivers/target/target_core_transport.c68
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c20
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c8
-rw-r--r--drivers/tty/cyclades.c3
-rw-r--r--drivers/tty/nozomi.c3
-rw-r--r--drivers/tty/serial/atmel_serial.c2
-rw-r--r--drivers/tty/serial/m32r_sio.c3
-rw-r--r--drivers/usb/otg/twl6030-usb.c10
-rw-r--r--drivers/video/mb862xx/mb862xxfbdrv.c1
-rw-r--r--drivers/w1/masters/Kconfig2
-rw-r--r--drivers/w1/masters/ds1wm.c321
-rw-r--r--drivers/w1/slaves/Kconfig20
-rw-r--r--drivers/w1/slaves/Makefile2
-rw-r--r--drivers/w1/slaves/w1_ds2408.c402
-rw-r--r--drivers/w1/slaves/w1_ds2780.c217
-rw-r--r--drivers/w1/slaves/w1_ds2780.h129
-rw-r--r--drivers/w1/w1.c12
-rw-r--r--drivers/w1/w1.h6
-rw-r--r--drivers/w1/w1_family.h2
-rw-r--r--drivers/w1/w1_io.c26
-rw-r--r--drivers/w1/w1_netlink.c5
-rw-r--r--fs/btrfs/Makefile2
-rw-r--r--fs/btrfs/acl.c2
-rw-r--r--fs/btrfs/btrfs_inode.h15
-rw-r--r--fs/btrfs/compression.c47
-rw-r--r--fs/btrfs/compression.h2
-rw-r--r--fs/btrfs/ctree.c51
-rw-r--r--fs/btrfs/ctree.h244
-rw-r--r--fs/btrfs/delayed-inode.c1695
-rw-r--r--fs/btrfs/delayed-inode.h141
-rw-r--r--fs/btrfs/delayed-ref.c114
-rw-r--r--fs/btrfs/delayed-ref.h6
-rw-r--r--fs/btrfs/dir-item.c39
-rw-r--r--fs/btrfs/disk-io.c210
-rw-r--r--fs/btrfs/disk-io.h19
-rw-r--r--fs/btrfs/export.c25
-rw-r--r--fs/btrfs/extent-tree.c1788
-rw-r--r--fs/btrfs/extent_io.c324
-rw-r--r--fs/btrfs/extent_io.h40
-rw-r--r--fs/btrfs/extent_map.c8
-rw-r--r--fs/btrfs/extent_map.h4
-rw-r--r--fs/btrfs/file-item.c38
-rw-r--r--fs/btrfs/file.c302
-rw-r--r--fs/btrfs/free-space-cache.c993
-rw-r--r--fs/btrfs/free-space-cache.h48
-rw-r--r--fs/btrfs/inode-item.c2
-rw-r--r--fs/btrfs/inode-map.c444
-rw-r--r--fs/btrfs/inode-map.h13
-rw-r--r--fs/btrfs/inode.c700
-rw-r--r--fs/btrfs/ioctl.c624
-rw-r--r--fs/btrfs/ioctl.h107
-rw-r--r--fs/btrfs/locking.c25
-rw-r--r--fs/btrfs/locking.h2
-rw-r--r--fs/btrfs/ref-cache.c164
-rw-r--r--fs/btrfs/ref-cache.h24
-rw-r--r--fs/btrfs/relocation.c67
-rw-r--r--fs/btrfs/root-tree.c61
-rw-r--r--fs/btrfs/scrub.c1369
-rw-r--r--fs/btrfs/super.c51
-rw-r--r--fs/btrfs/sysfs.c77
-rw-r--r--fs/btrfs/transaction.c196
-rw-r--r--fs/btrfs/transaction.h5
-rw-r--r--fs/btrfs/tree-defrag.c2
-rw-r--r--fs/btrfs/tree-log.c208
-rw-r--r--fs/btrfs/tree-log.h1
-rw-r--r--fs/btrfs/version.sh43
-rw-r--r--fs/btrfs/volumes.c657
-rw-r--r--fs/btrfs/volumes.h27
-rw-r--r--fs/btrfs/xattr.c12
-rw-r--r--fs/cifs/Kconfig20
-rw-r--r--fs/cifs/README3
-rw-r--r--fs/cifs/cache.c6
-rw-r--r--fs/cifs/cifs_debug.c26
-rw-r--r--fs/cifs/cifs_dfs_ref.c2
-rw-r--r--fs/cifs/cifs_fs_sb.h3
-rw-r--r--fs/cifs/cifs_spnego.c2
-rw-r--r--fs/cifs/cifs_spnego.h2
-rw-r--r--fs/cifs/cifsacl.c9
-rw-r--r--fs/cifs/cifsencrypt.c14
-rw-r--r--fs/cifs/cifsfs.c233
-rw-r--r--fs/cifs/cifsglob.h129
-rw-r--r--fs/cifs/cifsproto.h209
-rw-r--r--fs/cifs/cifssmb.c463
-rw-r--r--fs/cifs/connect.c625
-rw-r--r--fs/cifs/dir.c33
-rw-r--r--fs/cifs/file.c376
-rw-r--r--fs/cifs/fscache.c6
-rw-r--r--fs/cifs/fscache.h8
-rw-r--r--fs/cifs/inode.c92
-rw-r--r--fs/cifs/ioctl.c2
-rw-r--r--fs/cifs/link.c46
-rw-r--r--fs/cifs/misc.c32
-rw-r--r--fs/cifs/netmisc.c2
-rw-r--r--fs/cifs/readdir.c8
-rw-r--r--fs/cifs/sess.c42
-rw-r--r--fs/cifs/transport.c214
-rw-r--r--fs/cifs/xattr.c8
-rw-r--r--fs/dlm/main.c2
-rw-r--r--fs/ecryptfs/inode.c2
-rw-r--r--fs/ecryptfs/keystore.c46
-rw-r--r--fs/exec.c39
-rw-r--r--fs/gfs2/main.c2
-rw-r--r--fs/jffs2/dir.c4
-rw-r--r--fs/jffs2/scan.c19
-rw-r--r--fs/ncpfs/mmap.c2
-rw-r--r--fs/ocfs2/move_extents.c41
-rw-r--r--fs/partitions/check.c8
-rw-r--r--fs/partitions/efi.c9
-rw-r--r--fs/proc/array.c4
-rw-r--r--fs/proc/base.c83
-rw-r--r--fs/proc/stat.c6
-rw-r--r--fs/proc/task_mmu.c27
-rw-r--r--fs/proc/vmcore.c52
-rw-r--r--fs/squashfs/block.c2
-rw-r--r--fs/squashfs/cache.c31
-rw-r--r--fs/squashfs/decompressor.c2
-rw-r--r--fs/squashfs/decompressor.h2
-rw-r--r--fs/squashfs/dir.c2
-rw-r--r--fs/squashfs/export.c42
-rw-r--r--fs/squashfs/file.c2
-rw-r--r--fs/squashfs/fragment.c37
-rw-r--r--fs/squashfs/id.c42
-rw-r--r--fs/squashfs/inode.c2
-rw-r--r--fs/squashfs/namei.c2
-rw-r--r--fs/squashfs/squashfs.h10
-rw-r--r--fs/squashfs/squashfs_fs.h2
-rw-r--r--fs/squashfs/squashfs_fs_i.h2
-rw-r--r--fs/squashfs/squashfs_fs_sb.h2
-rw-r--r--fs/squashfs/super.c112
-rw-r--r--fs/squashfs/symlink.c2
-rw-r--r--fs/squashfs/xattr.c2
-rw-r--r--fs/squashfs/xattr.h3
-rw-r--r--fs/squashfs/xattr_id.c47
-rw-r--r--fs/squashfs/xz_wrapper.c2
-rw-r--r--fs/squashfs/zlib_wrapper.c2
-rw-r--r--fs/ufs/balloc.c9
-rw-r--r--fs/ufs/truncate.c2
-rw-r--r--include/asm-generic/bitops/find.h4
-rw-r--r--include/asm-generic/bitops/le.h7
-rw-r--r--include/asm-generic/bug.h40
-rw-r--r--include/asm-generic/ptrace.h74
-rw-r--r--include/asm-generic/unistd.h4
-rw-r--r--include/linux/bitops.h4
-rw-r--r--include/linux/cgroup.h13
-rw-r--r--include/linux/cgroup_subsys.h6
-rw-r--r--include/linux/crash_dump.h5
-rw-r--r--include/linux/cred.h2
-rw-r--r--include/linux/flex_array.h2
-rw-r--r--include/linux/if_ether.h4
-rw-r--r--include/linux/init_task.h9
-rw-r--r--include/linux/ipmi_smi.h2
-rw-r--r--include/linux/key.h2
-rw-r--r--include/linux/memcontrol.h24
-rw-r--r--include/linux/mfd/max8997-private.h4
-rw-r--r--include/linux/mfd/tps65910.h800
-rw-r--r--include/linux/mm.h10
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mmzone.h7
-rw-r--r--include/linux/mtd/mtd.h17
-rw-r--r--include/linux/mtd/nand.h4
-rw-r--r--include/linux/mtd/partitions.h16
-rw-r--r--include/linux/mtd/physmap.h4
-rw-r--r--include/linux/net.h6
-rw-r--r--include/linux/netfilter.h1
-rw-r--r--include/linux/netfilter/ipset/ip_set_ahash.h4
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h18
-rw-r--r--include/linux/nsproxy.h9
-rw-r--r--include/linux/pid.h2
-rw-r--r--include/linux/power/isp1704_charger.h (renamed from arch/arm/mach-s5p6442/include/mach/dma.h)19
-rw-r--r--include/linux/power/max8903_charger.h57
-rw-r--r--include/linux/proc_fs.h19
-rw-r--r--include/linux/ratelimit.h40
-rw-r--r--include/linux/regulator/machine.h7
-rw-r--r--include/linux/rtc.h8
-rw-r--r--include/linux/sched.h36
-rw-r--r--include/linux/swap.h3
-rw-r--r--include/linux/vm_event_item.h64
-rw-r--r--include/linux/vmstat.h62
-rw-r--r--include/media/m5mols.h35
-rw-r--r--include/media/videobuf-dvb.h4
-rw-r--r--include/net/ip_vs.h3
-rw-r--r--include/net/net_namespace.h1
-rw-r--r--include/net/net_ratelimit.h8
-rw-r--r--include/scsi/libsas.h1
-rw-r--r--include/scsi/scsi_tcq.h1
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/target/target_core_fabric_ops.h1
-rw-r--r--include/target/target_core_transport.h1
-rw-r--r--init/Kconfig8
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/cgroup.c587
-rw-r--r--kernel/cgroup_freezer.c26
-rw-r--r--kernel/cpuset.c103
-rw-r--r--kernel/cred.c2
-rw-r--r--kernel/fork.c68
-rw-r--r--kernel/ns_cgroup.c118
-rw-r--r--kernel/nsproxy.c4
-rw-r--r--kernel/pm_qos_params.c33
-rw-r--r--kernel/power/hibernate.c220
-rw-r--r--kernel/profile.c16
-rw-r--r--kernel/sched.c38
-rw-r--r--lib/Kconfig10
-rw-r--r--lib/Makefile8
-rw-r--r--lib/find_last_bit.c4
-rw-r--r--lib/find_next_bit.c18
-rw-r--r--lib/flex_array.c51
-rw-r--r--mm/filemap.c1
-rw-r--r--mm/memcontrol.c364
-rw-r--r--mm/memory.c2
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/page_cgroup.c28
-rw-r--r--mm/shmem.c11
-rw-r--r--mm/vmalloc.c4
-rw-r--r--mm/vmscan.c104
-rw-r--r--net/8021q/vlan.c5
-rw-r--r--net/atm/atm_sysfs.c10
-rw-r--r--net/atm/lec.c2
-rw-r--r--net/atm/mpc.c2
-rw-r--r--net/bridge/netfilter/ebtables.c6
-rw-r--r--net/can/proc.c7
-rw-r--r--net/core/ethtool.c25
-rw-r--r--net/core/filter.c1
-rw-r--r--net/core/sysctl_net_core.c1
-rw-r--r--net/core/utils.c1
-rw-r--r--net/ipv4/inetpeer.c42
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--net/netfilter/ipset/ip_set_core.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c27
-rw-r--r--scripts/selinux/README2
-rw-r--r--security/apparmor/match.c2
-rw-r--r--security/apparmor/policy_unpack.c4
-rw-r--r--security/device_cgroup.c3
-rw-r--r--security/keys/encrypted.c2
-rw-r--r--security/keys/process_keys.c1
-rw-r--r--security/keys/request_key.c2
-rw-r--r--security/keys/request_key_auth.c2
-rw-r--r--security/keys/trusted.c2
-rw-r--r--security/selinux/avc.c12
-rw-r--r--security/selinux/ss/services.c3
-rw-r--r--sound/core/control.c3
-rw-r--r--sound/core/init.c3
-rw-r--r--sound/core/oss/linear.c3
-rw-r--r--sound/core/pcm_lib.c17
-rw-r--r--sound/core/pcm_native.c21
-rw-r--r--sound/core/seq/seq_queue.c2
-rw-r--r--sound/pci/hda/hda_codec.c2
-rw-r--r--sound/pci/hda/hda_eld.c21
-rw-r--r--sound/pci/hda/hda_intel.c206
-rw-r--r--sound/pci/hda/patch_analog.c1
-rw-r--r--sound/pci/hda/patch_conexant.c42
-rw-r--r--sound/pci/hda/patch_hdmi.c123
-rw-r--r--sound/soc/atmel/sam9g20_wm8731.c2
-rw-r--r--sound/soc/codecs/wm1250-ev1.c2
-rw-r--r--sound/soc/codecs/wm8731.c2
-rw-r--r--sound/soc/codecs/wm8915.c1
-rw-r--r--sound/soc/pxa/raumfeld.c92
-rw-r--r--sound/soc/samsung/Kconfig4
-rw-r--r--sound/soc/samsung/smdk_wm8580.c2
-rw-r--r--sound/soc/soc-core.c8
-rw-r--r--sound/soc/soc-dapm.c2
-rw-r--r--sound/usb/card.c17
-rw-r--r--sound/usb/mixer.c32
-rw-r--r--sound/usb/mixer.h14
-rw-r--r--sound/usb/mixer_quirks.c70
-rw-r--r--sound/usb/quirks-table.h4
-rw-r--r--sound/usb/quirks.c18
-rw-r--r--sound/usb/usbaudio.h1
955 files changed, 31392 insertions, 17802 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 1b777b960492..1f89424c36a6 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -192,10 +192,6 @@ kernel-docs.txt
192 - listing of various WWW + books that document kernel internals. 192 - listing of various WWW + books that document kernel internals.
193kernel-parameters.txt 193kernel-parameters.txt
194 - summary listing of command line / boot prompt args for the kernel. 194 - summary listing of command line / boot prompt args for the kernel.
195keys-request-key.txt
196 - description of the kernel key request service.
197keys.txt
198 - description of the kernel key retention service.
199kobject.txt 195kobject.txt
200 - info of the kobject infrastructure of the Linux kernel. 196 - info of the kobject infrastructure of the Linux kernel.
201kprobes.txt 197kprobes.txt
@@ -294,6 +290,8 @@ scheduler/
294 - directory with info on the scheduler. 290 - directory with info on the scheduler.
295scsi/ 291scsi/
296 - directory with info on Linux scsi support. 292 - directory with info on Linux scsi support.
293security/
294 - directory that contains security-related info
297serial/ 295serial/
298 - directory with info on the low level serial API. 296 - directory with info on the low level serial API.
299serial-console.txt 297serial-console.txt
diff --git a/Documentation/DocBook/dvb/dvbproperty.xml b/Documentation/DocBook/dvb/dvbproperty.xml
index 52d5e3c7cf6c..b5365f61d69b 100644
--- a/Documentation/DocBook/dvb/dvbproperty.xml
+++ b/Documentation/DocBook/dvb/dvbproperty.xml
@@ -141,13 +141,15 @@ struct dtv_properties {
141 </row></tbody></tgroup></informaltable> 141 </row></tbody></tgroup></informaltable>
142</section> 142</section>
143 143
144<section>
145 <title>Property types</title>
144<para> 146<para>
145On <link linkend="FE_GET_PROPERTY">FE_GET_PROPERTY</link>/<link linkend="FE_SET_PROPERTY">FE_SET_PROPERTY</link>, 147On <link linkend="FE_GET_PROPERTY">FE_GET_PROPERTY</link>/<link linkend="FE_SET_PROPERTY">FE_SET_PROPERTY</link>,
146the actual action is determined by the dtv_property cmd/data pairs. With one single ioctl, is possible to 148the actual action is determined by the dtv_property cmd/data pairs. With one single ioctl, is possible to
147get/set up to 64 properties. The actual meaning of each property is described on the next sections. 149get/set up to 64 properties. The actual meaning of each property is described on the next sections.
148</para> 150</para>
149 151
150<para>The Available frontend property types are:</para> 152<para>The available frontend property types are:</para>
151<programlisting> 153<programlisting>
152#define DTV_UNDEFINED 0 154#define DTV_UNDEFINED 0
153#define DTV_TUNE 1 155#define DTV_TUNE 1
@@ -193,6 +195,7 @@ get/set up to 64 properties. The actual meaning of each property is described on
193#define DTV_ISDBT_LAYER_ENABLED 41 195#define DTV_ISDBT_LAYER_ENABLED 41
194#define DTV_ISDBS_TS_ID 42 196#define DTV_ISDBS_TS_ID 42
195</programlisting> 197</programlisting>
198</section>
196 199
197<section id="fe_property_common"> 200<section id="fe_property_common">
198 <title>Parameters that are common to all Digital TV standards</title> 201 <title>Parameters that are common to all Digital TV standards</title>
diff --git a/Documentation/DocBook/media-entities.tmpl b/Documentation/DocBook/media-entities.tmpl
index c8abb23ef1e7..e5fe09430fd9 100644
--- a/Documentation/DocBook/media-entities.tmpl
+++ b/Documentation/DocBook/media-entities.tmpl
@@ -293,6 +293,7 @@
293<!ENTITY sub-yuyv SYSTEM "v4l/pixfmt-yuyv.xml"> 293<!ENTITY sub-yuyv SYSTEM "v4l/pixfmt-yuyv.xml">
294<!ENTITY sub-yvyu SYSTEM "v4l/pixfmt-yvyu.xml"> 294<!ENTITY sub-yvyu SYSTEM "v4l/pixfmt-yvyu.xml">
295<!ENTITY sub-srggb10 SYSTEM "v4l/pixfmt-srggb10.xml"> 295<!ENTITY sub-srggb10 SYSTEM "v4l/pixfmt-srggb10.xml">
296<!ENTITY sub-srggb12 SYSTEM "v4l/pixfmt-srggb12.xml">
296<!ENTITY sub-srggb8 SYSTEM "v4l/pixfmt-srggb8.xml"> 297<!ENTITY sub-srggb8 SYSTEM "v4l/pixfmt-srggb8.xml">
297<!ENTITY sub-y10 SYSTEM "v4l/pixfmt-y10.xml"> 298<!ENTITY sub-y10 SYSTEM "v4l/pixfmt-y10.xml">
298<!ENTITY sub-y12 SYSTEM "v4l/pixfmt-y12.xml"> 299<!ENTITY sub-y12 SYSTEM "v4l/pixfmt-y12.xml">
@@ -373,9 +374,9 @@
373<!ENTITY sub-media-indices SYSTEM "media-indices.tmpl"> 374<!ENTITY sub-media-indices SYSTEM "media-indices.tmpl">
374 375
375<!ENTITY sub-media-controller SYSTEM "v4l/media-controller.xml"> 376<!ENTITY sub-media-controller SYSTEM "v4l/media-controller.xml">
376<!ENTITY sub-media-open SYSTEM "v4l/media-func-open.xml"> 377<!ENTITY sub-media-func-open SYSTEM "v4l/media-func-open.xml">
377<!ENTITY sub-media-close SYSTEM "v4l/media-func-close.xml"> 378<!ENTITY sub-media-func-close SYSTEM "v4l/media-func-close.xml">
378<!ENTITY sub-media-ioctl SYSTEM "v4l/media-func-ioctl.xml"> 379<!ENTITY sub-media-func-ioctl SYSTEM "v4l/media-func-ioctl.xml">
379<!ENTITY sub-media-ioc-device-info SYSTEM "v4l/media-ioc-device-info.xml"> 380<!ENTITY sub-media-ioc-device-info SYSTEM "v4l/media-ioc-device-info.xml">
380<!ENTITY sub-media-ioc-enum-entities SYSTEM "v4l/media-ioc-enum-entities.xml"> 381<!ENTITY sub-media-ioc-enum-entities SYSTEM "v4l/media-ioc-enum-entities.xml">
381<!ENTITY sub-media-ioc-enum-links SYSTEM "v4l/media-ioc-enum-links.xml"> 382<!ENTITY sub-media-ioc-enum-links SYSTEM "v4l/media-ioc-enum-links.xml">
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
index 6f242d5dee9a..17910e2052ad 100644
--- a/Documentation/DocBook/mtdnand.tmpl
+++ b/Documentation/DocBook/mtdnand.tmpl
@@ -189,8 +189,7 @@ static void __iomem *baseaddr;
189 <title>Partition defines</title> 189 <title>Partition defines</title>
190 <para> 190 <para>
191 If you want to divide your device into partitions, then 191 If you want to divide your device into partitions, then
192 enable the configuration switch CONFIG_MTD_PARTITIONS and define 192 define a partitioning scheme suitable to your board.
193 a partitioning scheme suitable to your board.
194 </para> 193 </para>
195 <programlisting> 194 <programlisting>
196#define NUM_PARTITIONS 2 195#define NUM_PARTITIONS 2
diff --git a/Documentation/DocBook/v4l/media-controller.xml b/Documentation/DocBook/v4l/media-controller.xml
index 2dc25e1d4089..873ac3a621f0 100644
--- a/Documentation/DocBook/v4l/media-controller.xml
+++ b/Documentation/DocBook/v4l/media-controller.xml
@@ -78,9 +78,9 @@
78<appendix id="media-user-func"> 78<appendix id="media-user-func">
79 <title>Function Reference</title> 79 <title>Function Reference</title>
80 <!-- Keep this alphabetically sorted. --> 80 <!-- Keep this alphabetically sorted. -->
81 &sub-media-open; 81 &sub-media-func-open;
82 &sub-media-close; 82 &sub-media-func-close;
83 &sub-media-ioctl; 83 &sub-media-func-ioctl;
84 <!-- All ioctls go here. --> 84 <!-- All ioctls go here. -->
85 &sub-media-ioc-device-info; 85 &sub-media-ioc-device-info;
86 &sub-media-ioc-enum-entities; 86 &sub-media-ioc-enum-entities;
diff --git a/Documentation/DocBook/v4l/pixfmt.xml b/Documentation/DocBook/v4l/pixfmt.xml
index dbfe3b08435f..deb660207f94 100644
--- a/Documentation/DocBook/v4l/pixfmt.xml
+++ b/Documentation/DocBook/v4l/pixfmt.xml
@@ -673,6 +673,7 @@ access the palette, this must be done with ioctls of the Linux framebuffer API.<
673 &sub-srggb8; 673 &sub-srggb8;
674 &sub-sbggr16; 674 &sub-sbggr16;
675 &sub-srggb10; 675 &sub-srggb10;
676 &sub-srggb12;
676 </section> 677 </section>
677 678
678 <section id="yuv-formats"> 679 <section id="yuv-formats">
diff --git a/Documentation/DocBook/v4l/subdev-formats.xml b/Documentation/DocBook/v4l/subdev-formats.xml
index a26b10c07857..8d3409d2c632 100644
--- a/Documentation/DocBook/v4l/subdev-formats.xml
+++ b/Documentation/DocBook/v4l/subdev-formats.xml
@@ -2531,13 +2531,13 @@
2531 <constant>_JPEG</constant> prefix the format code is made of 2531 <constant>_JPEG</constant> prefix the format code is made of
2532 the following information. 2532 the following information.
2533 <itemizedlist> 2533 <itemizedlist>
2534 <listitem>The number of bus samples per entropy encoded byte.</listitem> 2534 <listitem><para>The number of bus samples per entropy encoded byte.</para></listitem>
2535 <listitem>The bus width.</listitem> 2535 <listitem><para>The bus width.</para></listitem>
2536 </itemizedlist> 2536 </itemizedlist>
2537 </para>
2537 2538
2538 <para>For instance, for a JPEG baseline process and an 8-bit bus width 2539 <para>For instance, for a JPEG baseline process and an 8-bit bus width
2539 the format will be named <constant>V4L2_MBUS_FMT_JPEG_1X8</constant>. 2540 the format will be named <constant>V4L2_MBUS_FMT_JPEG_1X8</constant>.
2540 </para>
2541 </para> 2541 </para>
2542 2542
2543 <para>The following table lists existing JPEG compressed formats.</para> 2543 <para>The following table lists existing JPEG compressed formats.</para>
diff --git a/Documentation/accounting/getdelays.c b/Documentation/accounting/getdelays.c
index e9c77788a39d..f6318f6d7baf 100644
--- a/Documentation/accounting/getdelays.c
+++ b/Documentation/accounting/getdelays.c
@@ -177,6 +177,8 @@ static int get_family_id(int sd)
177 rc = send_cmd(sd, GENL_ID_CTRL, getpid(), CTRL_CMD_GETFAMILY, 177 rc = send_cmd(sd, GENL_ID_CTRL, getpid(), CTRL_CMD_GETFAMILY,
178 CTRL_ATTR_FAMILY_NAME, (void *)name, 178 CTRL_ATTR_FAMILY_NAME, (void *)name,
179 strlen(TASKSTATS_GENL_NAME)+1); 179 strlen(TASKSTATS_GENL_NAME)+1);
180 if (rc < 0)
181 return 0; /* sendto() failure? */
180 182
181 rep_len = recv(sd, &ans, sizeof(ans), 0); 183 rep_len = recv(sd, &ans, sizeof(ans), 0);
182 if (ans.n.nlmsg_type == NLMSG_ERROR || 184 if (ans.n.nlmsg_type == NLMSG_ERROR ||
@@ -191,30 +193,37 @@ static int get_family_id(int sd)
191 return id; 193 return id;
192} 194}
193 195
196#define average_ms(t, c) (t / 1000000ULL / (c ? c : 1))
197
194static void print_delayacct(struct taskstats *t) 198static void print_delayacct(struct taskstats *t)
195{ 199{
196 printf("\n\nCPU %15s%15s%15s%15s\n" 200 printf("\n\nCPU %15s%15s%15s%15s%15s\n"
197 " %15llu%15llu%15llu%15llu\n" 201 " %15llu%15llu%15llu%15llu%15.3fms\n"
198 "IO %15s%15s\n" 202 "IO %15s%15s%15s\n"
199 " %15llu%15llu\n" 203 " %15llu%15llu%15llums\n"
200 "SWAP %15s%15s\n" 204 "SWAP %15s%15s%15s\n"
201 " %15llu%15llu\n" 205 " %15llu%15llu%15llums\n"
202 "RECLAIM %12s%15s\n" 206 "RECLAIM %12s%15s%15s\n"
203 " %15llu%15llu\n", 207 " %15llu%15llu%15llums\n",
204 "count", "real total", "virtual total", "delay total", 208 "count", "real total", "virtual total",
209 "delay total", "delay average",
205 (unsigned long long)t->cpu_count, 210 (unsigned long long)t->cpu_count,
206 (unsigned long long)t->cpu_run_real_total, 211 (unsigned long long)t->cpu_run_real_total,
207 (unsigned long long)t->cpu_run_virtual_total, 212 (unsigned long long)t->cpu_run_virtual_total,
208 (unsigned long long)t->cpu_delay_total, 213 (unsigned long long)t->cpu_delay_total,
209 "count", "delay total", 214 average_ms((double)t->cpu_delay_total, t->cpu_count),
215 "count", "delay total", "delay average",
210 (unsigned long long)t->blkio_count, 216 (unsigned long long)t->blkio_count,
211 (unsigned long long)t->blkio_delay_total, 217 (unsigned long long)t->blkio_delay_total,
212 "count", "delay total", 218 average_ms(t->blkio_delay_total, t->blkio_count),
219 "count", "delay total", "delay average",
213 (unsigned long long)t->swapin_count, 220 (unsigned long long)t->swapin_count,
214 (unsigned long long)t->swapin_delay_total, 221 (unsigned long long)t->swapin_delay_total,
215 "count", "delay total", 222 average_ms(t->swapin_delay_total, t->swapin_count),
223 "count", "delay total", "delay average",
216 (unsigned long long)t->freepages_count, 224 (unsigned long long)t->freepages_count,
217 (unsigned long long)t->freepages_delay_total); 225 (unsigned long long)t->freepages_delay_total,
226 average_ms(t->freepages_delay_total, t->freepages_count));
218} 227}
219 228
220static void task_context_switch_counts(struct taskstats *t) 229static void task_context_switch_counts(struct taskstats *t)
@@ -433,8 +442,6 @@ int main(int argc, char *argv[])
433 } 442 }
434 443
435 do { 444 do {
436 int i;
437
438 rep_len = recv(nl_sd, &msg, sizeof(msg), 0); 445 rep_len = recv(nl_sd, &msg, sizeof(msg), 0);
439 PRINTF("received %d bytes\n", rep_len); 446 PRINTF("received %d bytes\n", rep_len);
440 447
@@ -459,7 +466,6 @@ int main(int argc, char *argv[])
459 466
460 na = (struct nlattr *) GENLMSG_DATA(&msg); 467 na = (struct nlattr *) GENLMSG_DATA(&msg);
461 len = 0; 468 len = 0;
462 i = 0;
463 while (len < rep_len) { 469 while (len < rep_len) {
464 len += NLA_ALIGN(na->nla_len); 470 len += NLA_ALIGN(na->nla_len);
465 switch (na->nla_type) { 471 switch (na->nla_type) {
diff --git a/Documentation/arm/Booting b/Documentation/arm/Booting
index 76850295af8f..4e686a2ed91e 100644
--- a/Documentation/arm/Booting
+++ b/Documentation/arm/Booting
@@ -65,13 +65,19 @@ looks at the connected hardware is beyond the scope of this document.
65The boot loader must ultimately be able to provide a MACH_TYPE_xxx 65The boot loader must ultimately be able to provide a MACH_TYPE_xxx
66value to the kernel. (see linux/arch/arm/tools/mach-types). 66value to the kernel. (see linux/arch/arm/tools/mach-types).
67 67
68 684. Setup boot data
694. Setup the kernel tagged list 69------------------
70-------------------------------
71 70
72Existing boot loaders: OPTIONAL, HIGHLY RECOMMENDED 71Existing boot loaders: OPTIONAL, HIGHLY RECOMMENDED
73New boot loaders: MANDATORY 72New boot loaders: MANDATORY
74 73
74The boot loader must provide either a tagged list or a dtb image for
75passing configuration data to the kernel. The physical address of the
76boot data is passed to the kernel in register r2.
77
784a. Setup the kernel tagged list
79--------------------------------
80
75The boot loader must create and initialise the kernel tagged list. 81The boot loader must create and initialise the kernel tagged list.
76A valid tagged list starts with ATAG_CORE and ends with ATAG_NONE. 82A valid tagged list starts with ATAG_CORE and ends with ATAG_NONE.
77The ATAG_CORE tag may or may not be empty. An empty ATAG_CORE tag 83The ATAG_CORE tag may or may not be empty. An empty ATAG_CORE tag
@@ -101,6 +107,24 @@ The tagged list must be placed in a region of memory where neither
101the kernel decompressor nor initrd 'bootp' program will overwrite 107the kernel decompressor nor initrd 'bootp' program will overwrite
102it. The recommended placement is in the first 16KiB of RAM. 108it. The recommended placement is in the first 16KiB of RAM.
103 109
1104b. Setup the device tree
111-------------------------
112
113The boot loader must load a device tree image (dtb) into system ram
114at a 64bit aligned address and initialize it with the boot data. The
115dtb format is documented in Documentation/devicetree/booting-without-of.txt.
116The kernel will look for the dtb magic value of 0xd00dfeed at the dtb
117physical address to determine if a dtb has been passed instead of a
118tagged list.
119
120The boot loader must pass at a minimum the size and location of the
121system memory, and the root filesystem location. The dtb must be
122placed in a region of memory where the kernel decompressor will not
123overwrite it. The recommended placement is in the first 16KiB of RAM
124with the caveat that it may not be located at physical address 0 since
125the kernel interprets a value of 0 in r2 to mean neither a tagged list
126nor a dtb were passed.
127
1045. Calling the kernel image 1285. Calling the kernel image
105--------------------------- 129---------------------------
106 130
@@ -125,7 +149,8 @@ In either case, the following conditions must be met:
125- CPU register settings 149- CPU register settings
126 r0 = 0, 150 r0 = 0,
127 r1 = machine type number discovered in (3) above. 151 r1 = machine type number discovered in (3) above.
128 r2 = physical address of tagged list in system RAM. 152 r2 = physical address of tagged list in system RAM, or
153 physical address of device tree block (dtb) in system RAM
129 154
130- CPU mode 155- CPU mode
131 All forms of interrupts must be disabled (IRQs and FIQs) 156 All forms of interrupts must be disabled (IRQs and FIQs)
diff --git a/Documentation/arm/Samsung/Overview.txt b/Documentation/arm/Samsung/Overview.txt
index c3094ea51aa7..658abb258cef 100644
--- a/Documentation/arm/Samsung/Overview.txt
+++ b/Documentation/arm/Samsung/Overview.txt
@@ -14,7 +14,6 @@ Introduction
14 - S3C24XX: See Documentation/arm/Samsung-S3C24XX/Overview.txt for full list 14 - S3C24XX: See Documentation/arm/Samsung-S3C24XX/Overview.txt for full list
15 - S3C64XX: S3C6400 and S3C6410 15 - S3C64XX: S3C6400 and S3C6410
16 - S5P6440 16 - S5P6440
17 - S5P6442
18 - S5PC100 17 - S5PC100
19 - S5PC110 / S5PV210 18 - S5PC110 / S5PV210
20 19
@@ -36,7 +35,6 @@ Configuration
36 unifying all the SoCs into one kernel. 35 unifying all the SoCs into one kernel.
37 36
38 s5p6440_defconfig - S5P6440 specific default configuration 37 s5p6440_defconfig - S5P6440 specific default configuration
39 s5p6442_defconfig - S5P6442 specific default configuration
40 s5pc100_defconfig - S5PC100 specific default configuration 38 s5pc100_defconfig - S5PC100 specific default configuration
41 s5pc110_defconfig - S5PC110 specific default configuration 39 s5pc110_defconfig - S5PC110 specific default configuration
42 s5pv210_defconfig - S5PV210 specific default configuration 40 s5pv210_defconfig - S5PV210 specific default configuration
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
index ac4d47187122..3bd585b44927 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/atomic_ops.txt
@@ -12,7 +12,7 @@ Also, it should be made opaque such that any kind of cast to a normal
12C integer type will fail. Something like the following should 12C integer type will fail. Something like the following should
13suffice: 13suffice:
14 14
15 typedef struct { volatile int counter; } atomic_t; 15 typedef struct { int counter; } atomic_t;
16 16
17Historically, counter has been declared volatile. This is now discouraged. 17Historically, counter has been declared volatile. This is now discouraged.
18See Documentation/volatile-considered-harmful.txt for the complete rationale. 18See Documentation/volatile-considered-harmful.txt for the complete rationale.
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index aedf1bd02fdd..0ed99f08f1f3 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -236,7 +236,8 @@ containing the following files describing that cgroup:
236 - cgroup.procs: list of tgids in the cgroup. This list is not 236 - cgroup.procs: list of tgids in the cgroup. This list is not
237 guaranteed to be sorted or free of duplicate tgids, and userspace 237 guaranteed to be sorted or free of duplicate tgids, and userspace
238 should sort/uniquify the list if this property is required. 238 should sort/uniquify the list if this property is required.
239 This is a read-only file, for now. 239 Writing a thread group id into this file moves all threads in that
240 group into this cgroup.
240 - notify_on_release flag: run the release agent on exit? 241 - notify_on_release flag: run the release agent on exit?
241 - release_agent: the path to use for release notifications (this file 242 - release_agent: the path to use for release notifications (this file
242 exists in the top cgroup only) 243 exists in the top cgroup only)
@@ -430,6 +431,12 @@ You can attach the current shell task by echoing 0:
430 431
431# echo 0 > tasks 432# echo 0 > tasks
432 433
434You can use the cgroup.procs file instead of the tasks file to move all
435threads in a threadgroup at once. Echoing the pid of any task in a
436threadgroup to cgroup.procs causes all tasks in that threadgroup to be
437be attached to the cgroup. Writing 0 to cgroup.procs moves all tasks
438in the writing task's threadgroup.
439
433Note: Since every task is always a member of exactly one cgroup in each 440Note: Since every task is always a member of exactly one cgroup in each
434mounted hierarchy, to remove a task from its current cgroup you must 441mounted hierarchy, to remove a task from its current cgroup you must
435move it into a new cgroup (possibly the root cgroup) by writing to the 442move it into a new cgroup (possibly the root cgroup) by writing to the
@@ -575,7 +582,7 @@ rmdir() will fail with it. From this behavior, pre_destroy() can be
575called multiple times against a cgroup. 582called multiple times against a cgroup.
576 583
577int can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 584int can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
578 struct task_struct *task, bool threadgroup) 585 struct task_struct *task)
579(cgroup_mutex held by caller) 586(cgroup_mutex held by caller)
580 587
581Called prior to moving a task into a cgroup; if the subsystem 588Called prior to moving a task into a cgroup; if the subsystem
@@ -584,9 +591,14 @@ task is passed, then a successful result indicates that *any*
584unspecified task can be moved into the cgroup. Note that this isn't 591unspecified task can be moved into the cgroup. Note that this isn't
585called on a fork. If this method returns 0 (success) then this should 592called on a fork. If this method returns 0 (success) then this should
586remain valid while the caller holds cgroup_mutex and it is ensured that either 593remain valid while the caller holds cgroup_mutex and it is ensured that either
587attach() or cancel_attach() will be called in future. If threadgroup is 594attach() or cancel_attach() will be called in future.
588true, then a successful result indicates that all threads in the given 595
589thread's threadgroup can be moved together. 596int can_attach_task(struct cgroup *cgrp, struct task_struct *tsk);
597(cgroup_mutex held by caller)
598
599As can_attach, but for operations that must be run once per task to be
600attached (possibly many when using cgroup_attach_proc). Called after
601can_attach.
590 602
591void cancel_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 603void cancel_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
592 struct task_struct *task, bool threadgroup) 604 struct task_struct *task, bool threadgroup)
@@ -598,15 +610,24 @@ function, so that the subsystem can implement a rollback. If not, not necessary.
598This will be called only about subsystems whose can_attach() operation have 610This will be called only about subsystems whose can_attach() operation have
599succeeded. 611succeeded.
600 612
613void pre_attach(struct cgroup *cgrp);
614(cgroup_mutex held by caller)
615
616For any non-per-thread attachment work that needs to happen before
617attach_task. Needed by cpuset.
618
601void attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 619void attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
602 struct cgroup *old_cgrp, struct task_struct *task, 620 struct cgroup *old_cgrp, struct task_struct *task)
603 bool threadgroup)
604(cgroup_mutex held by caller) 621(cgroup_mutex held by caller)
605 622
606Called after the task has been attached to the cgroup, to allow any 623Called after the task has been attached to the cgroup, to allow any
607post-attachment activity that requires memory allocations or blocking. 624post-attachment activity that requires memory allocations or blocking.
608If threadgroup is true, the subsystem should take care of all threads 625
609in the specified thread's threadgroup. Currently does not support any 626void attach_task(struct cgroup *cgrp, struct task_struct *tsk);
627(cgroup_mutex held by caller)
628
629As attach, but for operations that must be run once per task to be attached,
630like can_attach_task. Called before attach. Currently does not support any
610subsystem that might need the old_cgrp for every thread in the group. 631subsystem that might need the old_cgrp for every thread in the group.
611 632
612void fork(struct cgroup_subsy *ss, struct task_struct *task) 633void fork(struct cgroup_subsy *ss, struct task_struct *task)
@@ -630,7 +651,7 @@ always handled well.
630void post_clone(struct cgroup_subsys *ss, struct cgroup *cgrp) 651void post_clone(struct cgroup_subsys *ss, struct cgroup *cgrp)
631(cgroup_mutex held by caller) 652(cgroup_mutex held by caller)
632 653
633Called at the end of cgroup_clone() to do any parameter 654Called during cgroup_create() to do any parameter
634initialization which might be required before a task could attach. For 655initialization which might be required before a task could attach. For
635example in cpusets, no task may attach before 'cpus' and 'mems' are set 656example in cpusets, no task may attach before 'cpus' and 'mems' are set
636up. 657up.
diff --git a/Documentation/devicetree/booting-without-of.txt b/Documentation/devicetree/booting-without-of.txt
index 50619a0720a8..7c1329de0596 100644
--- a/Documentation/devicetree/booting-without-of.txt
+++ b/Documentation/devicetree/booting-without-of.txt
@@ -12,8 +12,9 @@ Table of Contents
12================= 12=================
13 13
14 I - Introduction 14 I - Introduction
15 1) Entry point for arch/powerpc 15 1) Entry point for arch/arm
16 2) Entry point for arch/x86 16 2) Entry point for arch/powerpc
17 3) Entry point for arch/x86
17 18
18 II - The DT block format 19 II - The DT block format
19 1) Header 20 1) Header
@@ -148,7 +149,46 @@ upgrades without significantly impacting the kernel code or cluttering
148it with special cases. 149it with special cases.
149 150
150 151
1511) Entry point for arch/powerpc 1521) Entry point for arch/arm
153---------------------------
154
155 There is one single entry point to the kernel, at the start
156 of the kernel image. That entry point supports two calling
157 conventions. A summary of the interface is described here. A full
158 description of the boot requirements is documented in
159 Documentation/arm/Booting
160
161 a) ATAGS interface. Minimal information is passed from firmware
162 to the kernel with a tagged list of predefined parameters.
163
164 r0 : 0
165
166 r1 : Machine type number
167
168 r2 : Physical address of tagged list in system RAM
169
170 b) Entry with a flattened device-tree block. Firmware loads the
171 physical address of the flattened device tree block (dtb) into r2,
172 r1 is not used, but it is considered good practise to use a valid
173 machine number as described in Documentation/arm/Booting.
174
175 r0 : 0
176
177 r1 : Valid machine type number. When using a device tree,
178 a single machine type number will often be assigned to
179 represent a class or family of SoCs.
180
181 r2 : physical pointer to the device-tree block
182 (defined in chapter II) in RAM. Device tree can be located
183 anywhere in system RAM, but it should be aligned on a 64 bit
184 boundary.
185
186 The kernel will differentiate between ATAGS and device tree booting by
187 reading the memory pointed to by r2 and looking for either the flattened
188 device tree block magic value (0xd00dfeed) or the ATAG_CORE value at
189 offset 0x4 from r2 (0x54410001).
190
1912) Entry point for arch/powerpc
152------------------------------- 192-------------------------------
153 193
154 There is one single entry point to the kernel, at the start 194 There is one single entry point to the kernel, at the start
@@ -226,7 +266,7 @@ it with special cases.
226 cannot support both configurations with Book E and configurations 266 cannot support both configurations with Book E and configurations
227 with classic Powerpc architectures. 267 with classic Powerpc architectures.
228 268
2292) Entry point for arch/x86 2693) Entry point for arch/x86
230------------------------------- 270-------------------------------
231 271
232 There is one single 32bit entry point to the kernel at code32_start, 272 There is one single 32bit entry point to the kernel at code32_start,
diff --git a/Documentation/filesystems/configfs/configfs_example_explicit.c b/Documentation/filesystems/configfs/configfs_example_explicit.c
index fd53869f5633..1420233dfa55 100644
--- a/Documentation/filesystems/configfs/configfs_example_explicit.c
+++ b/Documentation/filesystems/configfs/configfs_example_explicit.c
@@ -464,9 +464,8 @@ static int __init configfs_example_init(void)
464 return 0; 464 return 0;
465 465
466out_unregister: 466out_unregister:
467 for (; i >= 0; i--) { 467 for (i--; i >= 0; i--)
468 configfs_unregister_subsystem(example_subsys[i]); 468 configfs_unregister_subsystem(example_subsys[i]);
469 }
470 469
471 return ret; 470 return ret;
472} 471}
@@ -475,9 +474,8 @@ static void __exit configfs_example_exit(void)
475{ 474{
476 int i; 475 int i;
477 476
478 for (i = 0; example_subsys[i]; i++) { 477 for (i = 0; example_subsys[i]; i++)
479 configfs_unregister_subsystem(example_subsys[i]); 478 configfs_unregister_subsystem(example_subsys[i]);
480 }
481} 479}
482 480
483module_init(configfs_example_init); 481module_init(configfs_example_init);
diff --git a/Documentation/filesystems/configfs/configfs_example_macros.c b/Documentation/filesystems/configfs/configfs_example_macros.c
index d8e30a0378aa..327dfbc640a9 100644
--- a/Documentation/filesystems/configfs/configfs_example_macros.c
+++ b/Documentation/filesystems/configfs/configfs_example_macros.c
@@ -427,9 +427,8 @@ static int __init configfs_example_init(void)
427 return 0; 427 return 0;
428 428
429out_unregister: 429out_unregister:
430 for (; i >= 0; i--) { 430 for (i--; i >= 0; i--)
431 configfs_unregister_subsystem(example_subsys[i]); 431 configfs_unregister_subsystem(example_subsys[i]);
432 }
433 432
434 return ret; 433 return ret;
435} 434}
@@ -438,9 +437,8 @@ static void __exit configfs_example_exit(void)
438{ 437{
439 int i; 438 int i;
440 439
441 for (i = 0; example_subsys[i]; i++) { 440 for (i = 0; example_subsys[i]; i++)
442 configfs_unregister_subsystem(example_subsys[i]); 441 configfs_unregister_subsystem(example_subsys[i]);
443 }
444} 442}
445 443
446module_init(configfs_example_init); 444module_init(configfs_example_init);
diff --git a/Documentation/filesystems/nfs/idmapper.txt b/Documentation/filesystems/nfs/idmapper.txt
index b9b4192ea8b5..9c8fd6148656 100644
--- a/Documentation/filesystems/nfs/idmapper.txt
+++ b/Documentation/filesystems/nfs/idmapper.txt
@@ -47,8 +47,8 @@ request-key will find the first matching line and corresponding program. In
47this case, /some/other/program will handle all uid lookups and 47this case, /some/other/program will handle all uid lookups and
48/usr/sbin/nfs.idmap will handle gid, user, and group lookups. 48/usr/sbin/nfs.idmap will handle gid, user, and group lookups.
49 49
50See <file:Documentation/keys-request-keys.txt> for more information about the 50See <file:Documentation/security/keys-request-keys.txt> for more information
51request-key function. 51about the request-key function.
52 52
53 53
54========= 54=========
diff --git a/Documentation/networking/dns_resolver.txt b/Documentation/networking/dns_resolver.txt
index 04ca06325b08..7f531ad83285 100644
--- a/Documentation/networking/dns_resolver.txt
+++ b/Documentation/networking/dns_resolver.txt
@@ -139,8 +139,8 @@ the key will be discarded and recreated when the data it holds has expired.
139dns_query() returns a copy of the value attached to the key, or an error if 139dns_query() returns a copy of the value attached to the key, or an error if
140that is indicated instead. 140that is indicated instead.
141 141
142See <file:Documentation/keys-request-key.txt> for further information about 142See <file:Documentation/security/keys-request-key.txt> for further
143request-key function. 143information about request-key function.
144 144
145 145
146========= 146=========
diff --git a/Documentation/power/regulator/machine.txt b/Documentation/power/regulator/machine.txt
index bdec39b9bd75..b42419b52e44 100644
--- a/Documentation/power/regulator/machine.txt
+++ b/Documentation/power/regulator/machine.txt
@@ -53,11 +53,11 @@ static struct regulator_init_data regulator1_data = {
53 53
54Regulator-1 supplies power to Regulator-2. This relationship must be registered 54Regulator-1 supplies power to Regulator-2. This relationship must be registered
55with the core so that Regulator-1 is also enabled when Consumer A enables its 55with the core so that Regulator-1 is also enabled when Consumer A enables its
56supply (Regulator-2). The supply regulator is set by the supply_regulator_dev 56supply (Regulator-2). The supply regulator is set by the supply_regulator
57field below:- 57field below:-
58 58
59static struct regulator_init_data regulator2_data = { 59static struct regulator_init_data regulator2_data = {
60 .supply_regulator_dev = &platform_regulator1_device.dev, 60 .supply_regulator = "regulator_name",
61 .constraints = { 61 .constraints = {
62 .min_uV = 1800000, 62 .min_uV = 1800000,
63 .max_uV = 2000000, 63 .max_uV = 2000000,
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 4d9ce73ff730..9ed1d9d96783 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,17 @@
1Release Date : Wed. May 11, 2011 17:00:00 PST 2010 -
2 (emaild-id:megaraidlinux@lsi.com)
3 Adam Radford
4Current Version : 00.00.05.38-rc1
5Old Version : 00.00.05.34-rc1
6 1. Remove MSI-X black list, use MFI_REG_STATE.ready.msiEnable.
7 2. Remove un-used function megasas_return_cmd_for_smid().
8 3. Check MFI_REG_STATE.fault.resetAdapter in megasas_reset_fusion().
9 4. Disable interrupts/free_irq() in megasas_shutdown().
10 5. Fix bug where AENs could be lost in probe() and resume().
11 6. Convert 6,10,12 byte CDB's to 16 byte CDB for large LBA's for FastPath
12 IO.
13 7. Add 1078 OCR support.
14-------------------------------------------------------------------------------
1Release Date : Thu. Feb 24, 2011 17:00:00 PST 2010 - 15Release Date : Thu. Feb 24, 2011 17:00:00 PST 2010 -
2 (emaild-id:megaraidlinux@lsi.com) 16 (emaild-id:megaraidlinux@lsi.com)
3 Adam Radford 17 Adam Radford
diff --git a/Documentation/security/00-INDEX b/Documentation/security/00-INDEX
new file mode 100644
index 000000000000..19bc49439cac
--- /dev/null
+++ b/Documentation/security/00-INDEX
@@ -0,0 +1,18 @@
100-INDEX
2 - this file.
3SELinux.txt
4 - how to get started with the SELinux security enhancement.
5Smack.txt
6 - documentation on the Smack Linux Security Module.
7apparmor.txt
8 - documentation on the AppArmor security extension.
9credentials.txt
10 - documentation about credentials in Linux.
11keys-request-key.txt
12 - description of the kernel key request service.
13keys-trusted-encrypted.txt
14 - info on the Trusted and Encrypted keys in the kernel key ring service.
15keys.txt
16 - description of the kernel key retention service.
17tomoyo.txt
18 - documentation on the TOMOYO Linux Security Module.
diff --git a/Documentation/SELinux.txt b/Documentation/security/SELinux.txt
index 07eae00f3314..07eae00f3314 100644
--- a/Documentation/SELinux.txt
+++ b/Documentation/security/SELinux.txt
diff --git a/Documentation/Smack.txt b/Documentation/security/Smack.txt
index e9dab41c0fe0..e9dab41c0fe0 100644
--- a/Documentation/Smack.txt
+++ b/Documentation/security/Smack.txt
diff --git a/Documentation/apparmor.txt b/Documentation/security/apparmor.txt
index 93c1fd7d0635..93c1fd7d0635 100644
--- a/Documentation/apparmor.txt
+++ b/Documentation/security/apparmor.txt
diff --git a/Documentation/credentials.txt b/Documentation/security/credentials.txt
index 995baf379c07..fc0366cbd7ce 100644
--- a/Documentation/credentials.txt
+++ b/Documentation/security/credentials.txt
@@ -216,7 +216,7 @@ The Linux kernel supports the following types of credentials:
216 When a process accesses a key, if not already present, it will normally be 216 When a process accesses a key, if not already present, it will normally be
217 cached on one of these keyrings for future accesses to find. 217 cached on one of these keyrings for future accesses to find.
218 218
219 For more information on using keys, see Documentation/keys.txt. 219 For more information on using keys, see Documentation/security/keys.txt.
220 220
221 (5) LSM 221 (5) LSM
222 222
diff --git a/Documentation/keys-request-key.txt b/Documentation/security/keys-request-key.txt
index 69686ad12c66..51987bfecfed 100644
--- a/Documentation/keys-request-key.txt
+++ b/Documentation/security/keys-request-key.txt
@@ -3,8 +3,8 @@
3 =================== 3 ===================
4 4
5The key request service is part of the key retention service (refer to 5The key request service is part of the key retention service (refer to
6Documentation/keys.txt). This document explains more fully how the requesting 6Documentation/security/keys.txt). This document explains more fully how
7algorithm works. 7the requesting algorithm works.
8 8
9The process starts by either the kernel requesting a service by calling 9The process starts by either the kernel requesting a service by calling
10request_key*(): 10request_key*():
diff --git a/Documentation/keys-trusted-encrypted.txt b/Documentation/security/keys-trusted-encrypted.txt
index 8fb79bc1ac4b..8fb79bc1ac4b 100644
--- a/Documentation/keys-trusted-encrypted.txt
+++ b/Documentation/security/keys-trusted-encrypted.txt
diff --git a/Documentation/keys.txt b/Documentation/security/keys.txt
index 6523a9e6f293..4d75931d2d79 100644
--- a/Documentation/keys.txt
+++ b/Documentation/security/keys.txt
@@ -434,7 +434,7 @@ The main syscalls are:
434 /sbin/request-key will be invoked in an attempt to obtain a key. The 434 /sbin/request-key will be invoked in an attempt to obtain a key. The
435 callout_info string will be passed as an argument to the program. 435 callout_info string will be passed as an argument to the program.
436 436
437 See also Documentation/keys-request-key.txt. 437 See also Documentation/security/keys-request-key.txt.
438 438
439 439
440The keyctl syscall functions are: 440The keyctl syscall functions are:
@@ -864,7 +864,7 @@ payload contents" for more information.
864 If successful, the key will have been attached to the default keyring for 864 If successful, the key will have been attached to the default keyring for
865 implicitly obtained request-key keys, as set by KEYCTL_SET_REQKEY_KEYRING. 865 implicitly obtained request-key keys, as set by KEYCTL_SET_REQKEY_KEYRING.
866 866
867 See also Documentation/keys-request-key.txt. 867 See also Documentation/security/keys-request-key.txt.
868 868
869 869
870(*) To search for a key, passing auxiliary data to the upcaller, call: 870(*) To search for a key, passing auxiliary data to the upcaller, call:
diff --git a/Documentation/tomoyo.txt b/Documentation/security/tomoyo.txt
index 200a2d37cbc8..200a2d37cbc8 100644
--- a/Documentation/tomoyo.txt
+++ b/Documentation/security/tomoyo.txt
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 36f007514db3..5e7cb39ad195 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -161,7 +161,8 @@ core_pattern is used to specify a core dumpfile pattern name.
161 %s signal number 161 %s signal number
162 %t UNIX time of dump 162 %t UNIX time of dump
163 %h hostname 163 %h hostname
164 %e executable filename 164 %e executable filename (may be shortened)
165 %E executable path
165 %<OTHER> both are dropped 166 %<OTHER> both are dropped
166. If the first character of the pattern is a '|', the kernel will treat 167. If the first character of the pattern is a '|', the kernel will treat
167 the rest of the pattern as a command to run. The core dump will be 168 the rest of the pattern as a command to run. The core dump will be
diff --git a/MAINTAINERS b/MAINTAINERS
index 21a871c0527a..b9f5aee36375 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2304,7 +2304,7 @@ F: net/bridge/netfilter/ebt*.c
2304ECRYPT FILE SYSTEM 2304ECRYPT FILE SYSTEM
2305M: Tyler Hicks <tyhicks@linux.vnet.ibm.com> 2305M: Tyler Hicks <tyhicks@linux.vnet.ibm.com>
2306M: Dustin Kirkland <kirkland@canonical.com> 2306M: Dustin Kirkland <kirkland@canonical.com>
2307L: ecryptfs-devel@lists.launchpad.net 2307L: ecryptfs@vger.kernel.org
2308W: https://launchpad.net/ecryptfs 2308W: https://launchpad.net/ecryptfs
2309S: Supported 2309S: Supported
2310F: Documentation/filesystems/ecryptfs.txt 2310F: Documentation/filesystems/ecryptfs.txt
@@ -2584,6 +2584,13 @@ S: Maintained
2584F: drivers/hwmon/f75375s.c 2584F: drivers/hwmon/f75375s.c
2585F: include/linux/f75375s.h 2585F: include/linux/f75375s.h
2586 2586
2587FIREWIRE AUDIO DRIVERS
2588M: Clemens Ladisch <clemens@ladisch.de>
2589L: alsa-devel@alsa-project.org (moderated for non-subscribers)
2590T: git git://git.alsa-project.org/alsa-kernel.git
2591S: Maintained
2592F: sound/firewire/
2593
2587FIREWIRE SUBSYSTEM 2594FIREWIRE SUBSYSTEM
2588M: Stefan Richter <stefanr@s5r6.in-berlin.de> 2595M: Stefan Richter <stefanr@s5r6.in-berlin.de>
2589L: linux1394-devel@lists.sourceforge.net 2596L: linux1394-devel@lists.sourceforge.net
@@ -3719,7 +3726,7 @@ KEYS/KEYRINGS:
3719M: David Howells <dhowells@redhat.com> 3726M: David Howells <dhowells@redhat.com>
3720L: keyrings@linux-nfs.org 3727L: keyrings@linux-nfs.org
3721S: Maintained 3728S: Maintained
3722F: Documentation/keys.txt 3729F: Documentation/security/keys.txt
3723F: include/linux/key.h 3730F: include/linux/key.h
3724F: include/linux/key-type.h 3731F: include/linux/key-type.h
3725F: include/keys/ 3732F: include/keys/
@@ -3731,7 +3738,7 @@ M: Mimi Zohar <zohar@us.ibm.com>
3731L: linux-security-module@vger.kernel.org 3738L: linux-security-module@vger.kernel.org
3732L: keyrings@linux-nfs.org 3739L: keyrings@linux-nfs.org
3733S: Supported 3740S: Supported
3734F: Documentation/keys-trusted-encrypted.txt 3741F: Documentation/security/keys-trusted-encrypted.txt
3735F: include/keys/trusted-type.h 3742F: include/keys/trusted-type.h
3736F: security/keys/trusted.c 3743F: security/keys/trusted.c
3737F: security/keys/trusted.h 3744F: security/keys/trusted.h
@@ -3742,7 +3749,7 @@ M: David Safford <safford@watson.ibm.com>
3742L: linux-security-module@vger.kernel.org 3749L: linux-security-module@vger.kernel.org
3743L: keyrings@linux-nfs.org 3750L: keyrings@linux-nfs.org
3744S: Supported 3751S: Supported
3745F: Documentation/keys-trusted-encrypted.txt 3752F: Documentation/security/keys-trusted-encrypted.txt
3746F: include/keys/encrypted-type.h 3753F: include/keys/encrypted-type.h
3747F: security/keys/encrypted.c 3754F: security/keys/encrypted.c
3748F: security/keys/encrypted.h 3755F: security/keys/encrypted.h
@@ -4147,6 +4154,7 @@ M: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
4147L: linux-mm@kvack.org 4154L: linux-mm@kvack.org
4148S: Maintained 4155S: Maintained
4149F: mm/memcontrol.c 4156F: mm/memcontrol.c
4157F: mm/page_cgroup.c
4150 4158
4151MEMORY TECHNOLOGY DEVICES (MTD) 4159MEMORY TECHNOLOGY DEVICES (MTD)
4152M: David Woodhouse <dwmw2@infradead.org> 4160M: David Woodhouse <dwmw2@infradead.org>
@@ -5507,7 +5515,7 @@ F: drivers/scsi/sg.c
5507F: include/scsi/sg.h 5515F: include/scsi/sg.h
5508 5516
5509SCSI SUBSYSTEM 5517SCSI SUBSYSTEM
5510M: "James E.J. Bottomley" <James.Bottomley@suse.de> 5518M: "James E.J. Bottomley" <JBottomley@parallels.com>
5511L: linux-scsi@vger.kernel.org 5519L: linux-scsi@vger.kernel.org
5512T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git 5520T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git
5513T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6.git 5521T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6.git
@@ -6000,7 +6008,7 @@ F: Documentation/filesystems/spufs.txt
6000F: arch/powerpc/platforms/cell/spufs/ 6008F: arch/powerpc/platforms/cell/spufs/
6001 6009
6002SQUASHFS FILE SYSTEM 6010SQUASHFS FILE SYSTEM
6003M: Phillip Lougher <phillip@lougher.demon.co.uk> 6011M: Phillip Lougher <phillip@squashfs.org.uk>
6004L: squashfs-devel@lists.sourceforge.net (subscribers-only) 6012L: squashfs-devel@lists.sourceforge.net (subscribers-only)
6005W: http://squashfs.org.uk 6013W: http://squashfs.org.uk
6006S: Maintained 6014S: Maintained
@@ -6076,6 +6084,17 @@ F: Documentation/filesystems/sysv-fs.txt
6076F: fs/sysv/ 6084F: fs/sysv/
6077F: include/linux/sysv_fs.h 6085F: include/linux/sysv_fs.h
6078 6086
6087TARGET SUBSYSTEM
6088M: Nicholas A. Bellinger <nab@linux-iscsi.org>
6089L: linux-scsi@vger.kernel.org
6090L: http://groups.google.com/group/linux-iscsi-target-dev
6091W: http://www.linux-iscsi.org
6092T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core-2.6.git master
6093S: Supported
6094F: drivers/target/
6095F: include/target/
6096F: Documentation/target/
6097
6079TASKSTATS STATISTICS INTERFACE 6098TASKSTATS STATISTICS INTERFACE
6080M: Balbir Singh <balbir@linux.vnet.ibm.com> 6099M: Balbir Singh <balbir@linux.vnet.ibm.com>
6081S: Maintained 6100S: Maintained
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index e3a82775f9da..60219bf94198 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -41,10 +41,6 @@ config ARCH_HAS_ILOG2_U64
41 bool 41 bool
42 default n 42 default n
43 43
44config GENERIC_FIND_NEXT_BIT
45 bool
46 default y
47
48config GENERIC_CALIBRATE_DELAY 44config GENERIC_CALIBRATE_DELAY
49 bool 45 bool
50 default y 46 default y
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index b1834166922d..4ac48a095f3a 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -456,10 +456,11 @@
456#define __NR_open_by_handle_at 498 456#define __NR_open_by_handle_at 498
457#define __NR_clock_adjtime 499 457#define __NR_clock_adjtime 499
458#define __NR_syncfs 500 458#define __NR_syncfs 500
459#define __NR_setns 501
459 460
460#ifdef __KERNEL__ 461#ifdef __KERNEL__
461 462
462#define NR_SYSCALLS 501 463#define NR_SYSCALLS 502
463 464
464#define __ARCH_WANT_IPC_PARSE_VERSION 465#define __ARCH_WANT_IPC_PARSE_VERSION
465#define __ARCH_WANT_OLD_READDIR 466#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 15f999d41c75..b9c28f3f1956 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -519,6 +519,7 @@ sys_call_table:
519 .quad sys_open_by_handle_at 519 .quad sys_open_by_handle_at
520 .quad sys_clock_adjtime 520 .quad sys_clock_adjtime
521 .quad sys_syncfs /* 500 */ 521 .quad sys_syncfs /* 500 */
522 .quad sys_setns
522 523
523 .size sys_call_table, . - sys_call_table 524 .size sys_call_table, . - sys_call_table
524 .type sys_call_table, @object 525 .type sys_call_table, @object
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 7275009686e6..9adc278a22ab 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -294,6 +294,8 @@ config ARCH_AT91
294 bool "Atmel AT91" 294 bool "Atmel AT91"
295 select ARCH_REQUIRE_GPIOLIB 295 select ARCH_REQUIRE_GPIOLIB
296 select HAVE_CLK 296 select HAVE_CLK
297 select CLKDEV_LOOKUP
298 select ARM_PATCH_PHYS_VIRT if MMU
297 help 299 help
298 This enables support for systems based on the Atmel AT91RM9200, 300 This enables support for systems based on the Atmel AT91RM9200,
299 AT91SAM9 and AT91CAP9 processors. 301 AT91SAM9 and AT91CAP9 processors.
@@ -730,16 +732,6 @@ config ARCH_S5P64X0
730 Samsung S5P64X0 CPU based systems, such as the Samsung SMDK6440, 732 Samsung S5P64X0 CPU based systems, such as the Samsung SMDK6440,
731 SMDK6450. 733 SMDK6450.
732 734
733config ARCH_S5P6442
734 bool "Samsung S5P6442"
735 select CPU_V6
736 select GENERIC_GPIO
737 select HAVE_CLK
738 select ARCH_USES_GETTIMEOFFSET
739 select HAVE_S3C2410_WATCHDOG if WATCHDOG
740 help
741 Samsung S5P6442 CPU based systems
742
743config ARCH_S5PC100 735config ARCH_S5PC100
744 bool "Samsung S5PC100" 736 bool "Samsung S5PC100"
745 select GENERIC_GPIO 737 select GENERIC_GPIO
@@ -991,8 +983,6 @@ endif
991 983
992source "arch/arm/mach-s5p64x0/Kconfig" 984source "arch/arm/mach-s5p64x0/Kconfig"
993 985
994source "arch/arm/mach-s5p6442/Kconfig"
995
996source "arch/arm/mach-s5pc100/Kconfig" 986source "arch/arm/mach-s5pc100/Kconfig"
997 987
998source "arch/arm/mach-s5pv210/Kconfig" 988source "arch/arm/mach-s5pv210/Kconfig"
@@ -1399,7 +1389,6 @@ config NR_CPUS
1399config HOTPLUG_CPU 1389config HOTPLUG_CPU
1400 bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" 1390 bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
1401 depends on SMP && HOTPLUG && EXPERIMENTAL 1391 depends on SMP && HOTPLUG && EXPERIMENTAL
1402 depends on !ARCH_MSM
1403 help 1392 help
1404 Say Y here to experiment with turning CPUs off and on. CPUs 1393 Say Y here to experiment with turning CPUs off and on. CPUs
1405 can be controlled through /sys/devices/system/cpu. 1394 can be controlled through /sys/devices/system/cpu.
@@ -1420,7 +1409,7 @@ source kernel/Kconfig.preempt
1420config HZ 1409config HZ
1421 int 1410 int
1422 default 200 if ARCH_EBSA110 || ARCH_S3C2410 || ARCH_S5P64X0 || \ 1411 default 200 if ARCH_EBSA110 || ARCH_S3C2410 || ARCH_S5P64X0 || \
1423 ARCH_S5P6442 || ARCH_S5PV210 || ARCH_EXYNOS4 1412 ARCH_S5PV210 || ARCH_EXYNOS4
1424 default OMAP_32K_TIMER_HZ if ARCH_OMAP && OMAP_32K_TIMER 1413 default OMAP_32K_TIMER_HZ if ARCH_OMAP && OMAP_32K_TIMER
1425 default AT91_TIMER_HZ if ARCH_AT91 1414 default AT91_TIMER_HZ if ARCH_AT91
1426 default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE 1415 default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE
@@ -1516,6 +1505,9 @@ config ARCH_SPARSEMEM_DEFAULT
1516config ARCH_SELECT_MEMORY_MODEL 1505config ARCH_SELECT_MEMORY_MODEL
1517 def_bool ARCH_SPARSEMEM_ENABLE 1506 def_bool ARCH_SPARSEMEM_ENABLE
1518 1507
1508config HAVE_ARCH_PFN_VALID
1509 def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM
1510
1519config HIGHMEM 1511config HIGHMEM
1520 bool "High Memory Support" 1512 bool "High Memory Support"
1521 depends on MMU 1513 depends on MMU
@@ -1683,6 +1675,13 @@ endmenu
1683 1675
1684menu "Boot options" 1676menu "Boot options"
1685 1677
1678config USE_OF
1679 bool "Flattened Device Tree support"
1680 select OF
1681 select OF_EARLY_FLATTREE
1682 help
1683 Include support for flattened device tree machine descriptions.
1684
1686# Compressed boot loader in ROM. Yes, we really want to ask about 1685# Compressed boot loader in ROM. Yes, we really want to ask about
1687# TEXT and BSS so we preserve their values in the config files. 1686# TEXT and BSS so we preserve their values in the config files.
1688config ZBOOT_ROM_TEXT 1687config ZBOOT_ROM_TEXT
@@ -2021,7 +2020,7 @@ menu "Power management options"
2021source "kernel/power/Kconfig" 2020source "kernel/power/Kconfig"
2022 2021
2023config ARCH_SUSPEND_POSSIBLE 2022config ARCH_SUSPEND_POSSIBLE
2024 depends on !ARCH_S5P64X0 && !ARCH_S5P6442 && !ARCH_S5PC100 2023 depends on !ARCH_S5P64X0 && !ARCH_S5PC100
2025 depends on CPU_ARM920T || CPU_ARM926T || CPU_SA1100 || \ 2024 depends on CPU_ARM920T || CPU_ARM926T || CPU_SA1100 || \
2026 CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE 2025 CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE
2027 def_bool y 2026 def_bool y
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 25750bcb3397..f5b2b390c8f2 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -176,7 +176,6 @@ machine-$(CONFIG_ARCH_S3C2410) := s3c2410 s3c2400 s3c2412 s3c2416 s3c2440 s3c24
176machine-$(CONFIG_ARCH_S3C24A0) := s3c24a0 176machine-$(CONFIG_ARCH_S3C24A0) := s3c24a0
177machine-$(CONFIG_ARCH_S3C64XX) := s3c64xx 177machine-$(CONFIG_ARCH_S3C64XX) := s3c64xx
178machine-$(CONFIG_ARCH_S5P64X0) := s5p64x0 178machine-$(CONFIG_ARCH_S5P64X0) := s5p64x0
179machine-$(CONFIG_ARCH_S5P6442) := s5p6442
180machine-$(CONFIG_ARCH_S5PC100) := s5pc100 179machine-$(CONFIG_ARCH_S5PC100) := s5pc100
181machine-$(CONFIG_ARCH_S5PV210) := s5pv210 180machine-$(CONFIG_ARCH_S5PV210) := s5pv210
182machine-$(CONFIG_ARCH_EXYNOS4) := exynos4 181machine-$(CONFIG_ARCH_EXYNOS4) := exynos4
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index ea5ee4d067f3..4b71766fb21d 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -7,7 +7,7 @@ config ARM_VIC
7config ARM_VIC_NR 7config ARM_VIC_NR
8 int 8 int
9 default 4 if ARCH_S5PV210 9 default 4 if ARCH_S5PV210
10 default 3 if ARCH_S5P6442 || ARCH_S5PC100 10 default 3 if ARCH_S5PC100
11 default 2 11 default 2
12 depends on ARM_VIC 12 depends on ARM_VIC
13 help 13 help
diff --git a/arch/arm/configs/at572d940hfek_defconfig b/arch/arm/configs/at572d940hfek_defconfig
deleted file mode 100644
index 1b1158ae8f82..000000000000
--- a/arch/arm/configs/at572d940hfek_defconfig
+++ /dev/null
@@ -1,358 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2CONFIG_LOCALVERSION="-AT572D940HF"
3# CONFIG_LOCALVERSION_AUTO is not set
4CONFIG_SYSVIPC=y
5CONFIG_POSIX_MQUEUE=y
6CONFIG_BSD_PROCESS_ACCT=y
7CONFIG_BSD_PROCESS_ACCT_V3=y
8CONFIG_TASKSTATS=y
9CONFIG_TASK_XACCT=y
10CONFIG_TASK_IO_ACCOUNTING=y
11CONFIG_AUDIT=y
12CONFIG_CGROUPS=y
13CONFIG_CGROUP_CPUACCT=y
14CONFIG_CGROUP_SCHED=y
15CONFIG_RT_GROUP_SCHED=y
16CONFIG_SYSFS_DEPRECATED_V2=y
17CONFIG_RELAY=y
18CONFIG_BLK_DEV_INITRD=y
19# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
20CONFIG_EXPERT=y
21CONFIG_SLAB=y
22CONFIG_PROFILING=y
23CONFIG_OPROFILE=m
24CONFIG_KPROBES=y
25CONFIG_MODULES=y
26CONFIG_MODULE_UNLOAD=y
27CONFIG_MODVERSIONS=y
28CONFIG_MODULE_SRCVERSION_ALL=y
29# CONFIG_BLK_DEV_BSG is not set
30CONFIG_ARCH_AT91=y
31CONFIG_ARCH_AT572D940HF=y
32CONFIG_MACH_AT572D940HFEB=y
33CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
34CONFIG_NO_HZ=y
35CONFIG_HIGH_RES_TIMERS=y
36CONFIG_PREEMPT=y
37CONFIG_CMDLINE="mem=48M console=ttyS0 initrd=0x21100000,3145728 root=/dev/ram0 rw ip=172.16.1.181"
38CONFIG_KEXEC=y
39CONFIG_FPE_NWFPE=y
40CONFIG_FPE_NWFPE_XP=y
41CONFIG_NET=y
42CONFIG_PACKET=m
43CONFIG_UNIX=y
44CONFIG_INET=y
45# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
46# CONFIG_INET_XFRM_MODE_TUNNEL is not set
47# CONFIG_INET_XFRM_MODE_BEET is not set
48# CONFIG_INET_LRO is not set
49# CONFIG_INET_DIAG is not set
50# CONFIG_IPV6 is not set
51CONFIG_NET_PKTGEN=m
52CONFIG_NET_TCPPROBE=m
53CONFIG_CAN=m
54CONFIG_CAN_RAW=m
55CONFIG_CAN_BCM=m
56CONFIG_CAN_VCAN=m
57CONFIG_CAN_DEBUG_DEVICES=y
58CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
59CONFIG_CONNECTOR=m
60CONFIG_MTD=m
61CONFIG_MTD_DEBUG=y
62CONFIG_MTD_DEBUG_VERBOSE=1
63CONFIG_MTD_CONCAT=m
64CONFIG_MTD_PARTITIONS=y
65CONFIG_MTD_CHAR=m
66CONFIG_MTD_BLOCK=m
67CONFIG_MTD_BLOCK_RO=m
68CONFIG_FTL=m
69CONFIG_NFTL=m
70CONFIG_NFTL_RW=y
71CONFIG_INFTL=m
72CONFIG_RFD_FTL=m
73CONFIG_SSFDC=m
74CONFIG_MTD_OOPS=m
75CONFIG_MTD_CFI=m
76CONFIG_MTD_JEDECPROBE=m
77CONFIG_MTD_CFI_INTELEXT=m
78CONFIG_MTD_CFI_AMDSTD=m
79CONFIG_MTD_CFI_STAA=m
80CONFIG_MTD_ROM=m
81CONFIG_MTD_ABSENT=m
82CONFIG_MTD_COMPLEX_MAPPINGS=y
83CONFIG_MTD_PHYSMAP=m
84CONFIG_MTD_PLATRAM=m
85CONFIG_MTD_DATAFLASH=m
86CONFIG_MTD_M25P80=m
87CONFIG_MTD_SLRAM=m
88CONFIG_MTD_PHRAM=m
89CONFIG_MTD_MTDRAM=m
90CONFIG_MTD_BLOCK2MTD=m
91CONFIG_MTD_NAND=m
92CONFIG_MTD_NAND_VERIFY_WRITE=y
93CONFIG_MTD_NAND_DISKONCHIP=m
94CONFIG_MTD_NAND_NANDSIM=m
95CONFIG_MTD_NAND_PLATFORM=m
96CONFIG_MTD_ALAUDA=m
97CONFIG_MTD_UBI=m
98CONFIG_MTD_UBI_GLUEBI=m
99CONFIG_BLK_DEV_LOOP=y
100CONFIG_BLK_DEV_CRYPTOLOOP=m
101CONFIG_BLK_DEV_NBD=m
102CONFIG_BLK_DEV_RAM=y
103CONFIG_BLK_DEV_RAM_SIZE=65536
104CONFIG_ATMEL_TCLIB=y
105CONFIG_ATMEL_SSC=m
106CONFIG_SENSORS_TSL2550=m
107CONFIG_DS1682=m
108CONFIG_RAID_ATTRS=m
109CONFIG_SCSI=m
110CONFIG_SCSI_TGT=m
111# CONFIG_SCSI_PROC_FS is not set
112CONFIG_BLK_DEV_SD=m
113CONFIG_BLK_DEV_SR=m
114CONFIG_CHR_DEV_SG=m
115CONFIG_CHR_DEV_SCH=m
116CONFIG_SCSI_MULTI_LUN=y
117CONFIG_SCSI_CONSTANTS=y
118CONFIG_SCSI_LOGGING=y
119CONFIG_SCSI_SCAN_ASYNC=y
120CONFIG_SCSI_ISCSI_ATTRS=m
121CONFIG_NETDEVICES=y
122CONFIG_DUMMY=m
123CONFIG_BONDING=m
124CONFIG_MACVLAN=m
125CONFIG_EQUALIZER=m
126CONFIG_TUN=m
127CONFIG_VETH=m
128CONFIG_PHYLIB=y
129CONFIG_MARVELL_PHY=m
130CONFIG_DAVICOM_PHY=m
131CONFIG_QSEMI_PHY=m
132CONFIG_LXT_PHY=m
133CONFIG_CICADA_PHY=m
134CONFIG_VITESSE_PHY=m
135CONFIG_SMSC_PHY=m
136CONFIG_BROADCOM_PHY=m
137CONFIG_ICPLUS_PHY=m
138CONFIG_MDIO_BITBANG=m
139CONFIG_NET_ETHERNET=y
140# CONFIG_NETDEV_1000 is not set
141# CONFIG_NETDEV_10000 is not set
142CONFIG_USB_ZD1201=m
143CONFIG_HOSTAP=m
144CONFIG_HOSTAP_FIRMWARE=y
145CONFIG_HOSTAP_FIRMWARE_NVRAM=y
146CONFIG_USB_CATC=m
147CONFIG_USB_KAWETH=m
148CONFIG_USB_PEGASUS=m
149CONFIG_USB_RTL8150=m
150CONFIG_USB_USBNET=m
151CONFIG_USB_NET_DM9601=m
152CONFIG_USB_NET_GL620A=m
153CONFIG_USB_NET_PLUSB=m
154CONFIG_USB_NET_MCS7830=m
155CONFIG_USB_NET_RNDIS_HOST=m
156CONFIG_USB_ALI_M5632=y
157CONFIG_USB_AN2720=y
158CONFIG_USB_EPSON2888=y
159CONFIG_USB_KC2190=y
160# CONFIG_USB_NET_ZAURUS is not set
161CONFIG_INPUT_MOUSEDEV=m
162CONFIG_INPUT_EVDEV=m
163CONFIG_INPUT_EVBUG=m
164CONFIG_KEYBOARD_LKKBD=m
165CONFIG_KEYBOARD_GPIO=m
166CONFIG_KEYBOARD_NEWTON=m
167CONFIG_KEYBOARD_STOWAWAY=m
168CONFIG_KEYBOARD_SUNKBD=m
169CONFIG_KEYBOARD_XTKBD=m
170CONFIG_MOUSE_PS2=m
171CONFIG_MOUSE_SERIAL=m
172CONFIG_MOUSE_APPLETOUCH=m
173CONFIG_MOUSE_VSXXXAA=m
174CONFIG_MOUSE_GPIO=m
175CONFIG_INPUT_MISC=y
176CONFIG_INPUT_UINPUT=m
177CONFIG_SERIO_SERPORT=m
178CONFIG_SERIO_RAW=m
179CONFIG_VT_HW_CONSOLE_BINDING=y
180CONFIG_SERIAL_NONSTANDARD=y
181CONFIG_N_HDLC=m
182CONFIG_SPECIALIX=m
183CONFIG_STALDRV=y
184CONFIG_SERIAL_ATMEL=y
185CONFIG_SERIAL_ATMEL_CONSOLE=y
186CONFIG_IPMI_HANDLER=m
187CONFIG_IPMI_DEVICE_INTERFACE=m
188CONFIG_IPMI_SI=m
189CONFIG_IPMI_WATCHDOG=m
190CONFIG_IPMI_POWEROFF=m
191CONFIG_HW_RANDOM=y
192CONFIG_R3964=m
193CONFIG_RAW_DRIVER=m
194CONFIG_TCG_TPM=m
195CONFIG_TCG_NSC=m
196CONFIG_TCG_ATMEL=m
197CONFIG_I2C=m
198CONFIG_I2C_CHARDEV=m
199CONFIG_SPI=y
200CONFIG_SPI_ATMEL=y
201CONFIG_SPI_BITBANG=m
202CONFIG_SPI_SPIDEV=m
203# CONFIG_HWMON is not set
204# CONFIG_VGA_CONSOLE is not set
205CONFIG_SOUND=m
206CONFIG_SND=m
207CONFIG_SND_SEQUENCER=m
208CONFIG_SND_SEQ_DUMMY=m
209CONFIG_SND_MIXER_OSS=m
210CONFIG_SND_PCM_OSS=m
211# CONFIG_SND_PCM_OSS_PLUGINS is not set
212CONFIG_SND_SEQUENCER_OSS=y
213CONFIG_SND_DYNAMIC_MINORS=y
214# CONFIG_SND_VERBOSE_PROCFS is not set
215CONFIG_SND_DUMMY=m
216CONFIG_SND_VIRMIDI=m
217CONFIG_SND_USB_AUDIO=m
218CONFIG_SND_USB_CAIAQ=m
219CONFIG_SND_USB_CAIAQ_INPUT=y
220CONFIG_HID=m
221CONFIG_HIDRAW=y
222CONFIG_USB_HID=m
223CONFIG_USB_HIDDEV=y
224CONFIG_USB_KBD=m
225CONFIG_USB_MOUSE=m
226CONFIG_HID_A4TECH=m
227CONFIG_HID_APPLE=m
228CONFIG_HID_BELKIN=m
229CONFIG_HID_CHERRY=m
230CONFIG_HID_CHICONY=m
231CONFIG_HID_CYPRESS=m
232CONFIG_HID_EZKEY=m
233CONFIG_HID_GYRATION=m
234CONFIG_HID_LOGITECH=m
235CONFIG_HID_MICROSOFT=m
236CONFIG_HID_MONTEREY=m
237CONFIG_HID_PANTHERLORD=m
238CONFIG_HID_PETALYNX=m
239CONFIG_HID_SAMSUNG=m
240CONFIG_HID_SONY=m
241CONFIG_HID_SUNPLUS=m
242CONFIG_USB=y
243CONFIG_USB_DEVICEFS=y
244# CONFIG_USB_DEVICE_CLASS is not set
245CONFIG_USB_DYNAMIC_MINORS=y
246CONFIG_USB_MON=y
247CONFIG_USB_OHCI_HCD=y
248CONFIG_USB_STORAGE=m
249CONFIG_USB_STORAGE_DATAFAB=m
250CONFIG_USB_STORAGE_FREECOM=m
251CONFIG_USB_STORAGE_ISD200=m
252CONFIG_USB_STORAGE_USBAT=m
253CONFIG_USB_STORAGE_SDDR09=m
254CONFIG_USB_STORAGE_SDDR55=m
255CONFIG_USB_STORAGE_JUMPSHOT=m
256CONFIG_USB_STORAGE_ALAUDA=m
257CONFIG_USB_STORAGE_KARMA=m
258CONFIG_USB_LIBUSUAL=y
259CONFIG_USB_SERIAL=m
260CONFIG_USB_EZUSB=y
261CONFIG_USB_SERIAL_GENERIC=y
262CONFIG_USB_SERIAL_PL2303=m
263CONFIG_USB_SERIAL_SPCP8X5=m
264CONFIG_USB_SERIAL_DEBUG=m
265CONFIG_USB_EMI62=m
266CONFIG_USB_EMI26=m
267CONFIG_USB_ADUTUX=m
268CONFIG_USB_TEST=m
269CONFIG_USB_GADGET=m
270CONFIG_USB_GADGET_DEBUG_FILES=y
271CONFIG_USB_GADGET_DEBUG_FS=y
272CONFIG_USB_ZERO=m
273CONFIG_USB_ETH=m
274CONFIG_USB_GADGETFS=m
275CONFIG_USB_FILE_STORAGE=m
276CONFIG_USB_G_SERIAL=m
277CONFIG_USB_MIDI_GADGET=m
278CONFIG_MMC=y
279CONFIG_SDIO_UART=m
280CONFIG_MMC_AT91=m
281CONFIG_MMC_SPI=m
282CONFIG_NEW_LEDS=y
283CONFIG_LEDS_CLASS=m
284CONFIG_LEDS_GPIO=m
285CONFIG_LEDS_TRIGGERS=y
286CONFIG_LEDS_TRIGGER_TIMER=m
287CONFIG_LEDS_TRIGGER_HEARTBEAT=m
288CONFIG_RTC_CLASS=y
289CONFIG_RTC_INTF_DEV_UIE_EMUL=y
290CONFIG_RTC_DRV_DS1307=m
291CONFIG_RTC_DRV_DS1305=y
292CONFIG_EXT2_FS=y
293CONFIG_EXT2_FS_XATTR=y
294CONFIG_EXT2_FS_POSIX_ACL=y
295CONFIG_EXT2_FS_SECURITY=y
296CONFIG_EXT3_FS=y
297CONFIG_EXT3_FS_POSIX_ACL=y
298CONFIG_EXT3_FS_SECURITY=y
299CONFIG_JBD_DEBUG=y
300CONFIG_REISERFS_FS=m
301CONFIG_REISERFS_CHECK=y
302CONFIG_REISERFS_PROC_INFO=y
303CONFIG_REISERFS_FS_XATTR=y
304CONFIG_REISERFS_FS_POSIX_ACL=y
305CONFIG_REISERFS_FS_SECURITY=y
306CONFIG_INOTIFY=y
307CONFIG_FUSE_FS=m
308CONFIG_MSDOS_FS=m
309CONFIG_VFAT_FS=y
310CONFIG_NTFS_FS=m
311CONFIG_NTFS_RW=y
312CONFIG_TMPFS=y
313CONFIG_TMPFS_POSIX_ACL=y
314CONFIG_JFFS2_FS=m
315CONFIG_JFFS2_COMPRESSION_OPTIONS=y
316CONFIG_JFFS2_LZO=y
317CONFIG_JFFS2_CMODE_FAVOURLZO=y
318CONFIG_CRAMFS=m
319CONFIG_NFS_FS=m
320CONFIG_NFS_V3=y
321CONFIG_NFS_V3_ACL=y
322CONFIG_NFS_V4=y
323CONFIG_NFSD=m
324CONFIG_NFSD_V3_ACL=y
325CONFIG_NFSD_V4=y
326CONFIG_CIFS=m
327CONFIG_CIFS_WEAK_PW_HASH=y
328CONFIG_PARTITION_ADVANCED=y
329CONFIG_MAC_PARTITION=y
330CONFIG_BSD_DISKLABEL=y
331CONFIG_MINIX_SUBPARTITION=y
332CONFIG_SOLARIS_X86_PARTITION=y
333CONFIG_UNIXWARE_DISKLABEL=y
334CONFIG_LDM_PARTITION=y
335CONFIG_LDM_DEBUG=y
336CONFIG_SGI_PARTITION=y
337CONFIG_SUN_PARTITION=y
338CONFIG_NLS_DEFAULT="cp437"
339CONFIG_NLS_CODEPAGE_437=y
340CONFIG_NLS_CODEPAGE_850=m
341CONFIG_NLS_ASCII=y
342CONFIG_NLS_ISO8859_1=y
343CONFIG_NLS_UTF8=m
344CONFIG_DLM=m
345CONFIG_PRINTK_TIME=y
346CONFIG_MAGIC_SYSRQ=y
347CONFIG_UNUSED_SYMBOLS=y
348CONFIG_DEBUG_FS=y
349# CONFIG_RCU_CPU_STALL_DETECTOR is not set
350CONFIG_SYSCTL_SYSCALL_CHECK=y
351CONFIG_CRYPTO=y
352CONFIG_CRYPTO_GF128MUL=m
353CONFIG_CRYPTO_HMAC=y
354CONFIG_CRYPTO_MD5=y
355# CONFIG_CRYPTO_ANSI_CPRNG is not set
356# CONFIG_CRYPTO_HW is not set
357CONFIG_CRC_CCITT=m
358CONFIG_CRC16=m
diff --git a/arch/arm/configs/at91sam9261ek_defconfig b/arch/arm/configs/at91sam9261_defconfig
index b46025b66b64..ade6b2f23116 100644
--- a/arch/arm/configs/at91sam9261ek_defconfig
+++ b/arch/arm/configs/at91sam9261_defconfig
@@ -1,9 +1,13 @@
1CONFIG_EXPERIMENTAL=y 1CONFIG_EXPERIMENTAL=y
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_KERNEL_LZMA=y
3# CONFIG_SWAP is not set 4# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 5CONFIG_SYSVIPC=y
6CONFIG_IKCONFIG=y
7CONFIG_IKCONFIG_PROC=y
5CONFIG_LOG_BUF_SHIFT=14 8CONFIG_LOG_BUF_SHIFT=14
6CONFIG_BLK_DEV_INITRD=y 9CONFIG_NAMESPACES=y
10CONFIG_EMBEDDED=y
7CONFIG_SLAB=y 11CONFIG_SLAB=y
8CONFIG_MODULES=y 12CONFIG_MODULES=y
9CONFIG_MODULE_UNLOAD=y 13CONFIG_MODULE_UNLOAD=y
@@ -15,18 +19,27 @@ CONFIG_ARCH_AT91SAM9261=y
15CONFIG_MACH_AT91SAM9261EK=y 19CONFIG_MACH_AT91SAM9261EK=y
16CONFIG_AT91_PROGRAMMABLE_CLOCKS=y 20CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
17# CONFIG_ARM_THUMB is not set 21# CONFIG_ARM_THUMB is not set
22CONFIG_AEABI=y
23# CONFIG_OABI_COMPAT is not set
18CONFIG_ZBOOT_ROM_TEXT=0x0 24CONFIG_ZBOOT_ROM_TEXT=0x0
19CONFIG_ZBOOT_ROM_BSS=0x0 25CONFIG_ZBOOT_ROM_BSS=0x0
20CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw" 26CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
21CONFIG_FPE_NWFPE=y 27CONFIG_AUTO_ZRELADDR=y
28CONFIG_VFP=y
29# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
22CONFIG_NET=y 30CONFIG_NET=y
23CONFIG_PACKET=y 31CONFIG_PACKET=y
24CONFIG_UNIX=y 32CONFIG_UNIX=y
25CONFIG_INET=y 33CONFIG_INET=y
34CONFIG_IP_MULTICAST=y
26CONFIG_IP_PNP=y 35CONFIG_IP_PNP=y
36CONFIG_IP_PNP_DHCP=y
27CONFIG_IP_PNP_BOOTP=y 37CONFIG_IP_PNP_BOOTP=y
28# CONFIG_INET_LRO is not set 38# CONFIG_INET_LRO is not set
29# CONFIG_IPV6 is not set 39# CONFIG_IPV6 is not set
40CONFIG_CFG80211=y
41CONFIG_LIB80211=y
42CONFIG_MAC80211=y
30CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 43CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
31CONFIG_MTD=y 44CONFIG_MTD=y
32CONFIG_MTD_PARTITIONS=y 45CONFIG_MTD_PARTITIONS=y
@@ -34,8 +47,12 @@ CONFIG_MTD_CMDLINE_PARTS=y
34CONFIG_MTD_BLOCK=y 47CONFIG_MTD_BLOCK=y
35CONFIG_MTD_NAND=y 48CONFIG_MTD_NAND=y
36CONFIG_MTD_NAND_ATMEL=y 49CONFIG_MTD_NAND_ATMEL=y
50CONFIG_MTD_UBI=y
51CONFIG_MTD_UBI_GLUEBI=y
37CONFIG_BLK_DEV_RAM=y 52CONFIG_BLK_DEV_RAM=y
38CONFIG_BLK_DEV_RAM_SIZE=8192 53CONFIG_BLK_DEV_RAM_SIZE=8192
54CONFIG_MISC_DEVICES=y
55CONFIG_ATMEL_TCLIB=y
39CONFIG_ATMEL_SSC=y 56CONFIG_ATMEL_SSC=y
40CONFIG_SCSI=y 57CONFIG_SCSI=y
41CONFIG_BLK_DEV_SD=y 58CONFIG_BLK_DEV_SD=y
@@ -45,12 +62,27 @@ CONFIG_NET_ETHERNET=y
45CONFIG_DM9000=y 62CONFIG_DM9000=y
46# CONFIG_NETDEV_1000 is not set 63# CONFIG_NETDEV_1000 is not set
47# CONFIG_NETDEV_10000 is not set 64# CONFIG_NETDEV_10000 is not set
65CONFIG_USB_ZD1201=m
66CONFIG_RTL8187=m
67CONFIG_LIBERTAS=m
68CONFIG_LIBERTAS_USB=m
69CONFIG_LIBERTAS_SDIO=m
70CONFIG_LIBERTAS_SPI=m
71CONFIG_RT2X00=m
72CONFIG_RT2500USB=m
73CONFIG_RT73USB=m
74CONFIG_ZD1211RW=m
75CONFIG_INPUT_POLLDEV=m
48# CONFIG_INPUT_MOUSEDEV_PSAUX is not set 76# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
77CONFIG_INPUT_MOUSEDEV_SCREEN_X=240
78CONFIG_INPUT_MOUSEDEV_SCREEN_Y=320
79CONFIG_INPUT_EVDEV=y
49# CONFIG_KEYBOARD_ATKBD is not set 80# CONFIG_KEYBOARD_ATKBD is not set
50CONFIG_KEYBOARD_GPIO=y 81CONFIG_KEYBOARD_GPIO=y
51# CONFIG_INPUT_MOUSE is not set 82# CONFIG_INPUT_MOUSE is not set
52CONFIG_INPUT_TOUCHSCREEN=y 83CONFIG_INPUT_TOUCHSCREEN=y
53CONFIG_TOUCHSCREEN_ADS7846=y 84CONFIG_TOUCHSCREEN_ADS7846=y
85CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
54CONFIG_SERIAL_ATMEL=y 86CONFIG_SERIAL_ATMEL=y
55CONFIG_SERIAL_ATMEL_CONSOLE=y 87CONFIG_SERIAL_ATMEL_CONSOLE=y
56CONFIG_HW_RANDOM=y 88CONFIG_HW_RANDOM=y
@@ -65,31 +97,62 @@ CONFIG_WATCHDOG_NOWAYOUT=y
65CONFIG_AT91SAM9X_WATCHDOG=y 97CONFIG_AT91SAM9X_WATCHDOG=y
66CONFIG_FB=y 98CONFIG_FB=y
67CONFIG_FB_ATMEL=y 99CONFIG_FB_ATMEL=y
68# CONFIG_VGA_CONSOLE is not set 100CONFIG_BACKLIGHT_LCD_SUPPORT=y
101# CONFIG_LCD_CLASS_DEVICE is not set
102CONFIG_BACKLIGHT_CLASS_DEVICE=y
103CONFIG_BACKLIGHT_ATMEL_LCDC=y
104# CONFIG_BACKLIGHT_GENERIC is not set
105CONFIG_FRAMEBUFFER_CONSOLE=y
106CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
107CONFIG_LOGO=y
108CONFIG_SOUND=y
109CONFIG_SND=y
110CONFIG_SND_SEQUENCER=y
111CONFIG_SND_MIXER_OSS=y
112CONFIG_SND_PCM_OSS=y
113# CONFIG_SND_SUPPORT_OLD_API is not set
114# CONFIG_SND_VERBOSE_PROCFS is not set
115# CONFIG_SND_DRIVERS is not set
116# CONFIG_SND_ARM is not set
117CONFIG_SND_AT73C213=y
118CONFIG_SND_USB_AUDIO=m
69# CONFIG_USB_HID is not set 119# CONFIG_USB_HID is not set
70CONFIG_USB=y 120CONFIG_USB=y
71CONFIG_USB_DEVICEFS=y 121CONFIG_USB_DEVICEFS=y
72CONFIG_USB_MON=y
73CONFIG_USB_OHCI_HCD=y 122CONFIG_USB_OHCI_HCD=y
74CONFIG_USB_STORAGE=y 123CONFIG_USB_STORAGE=y
75CONFIG_USB_STORAGE_DEBUG=y
76CONFIG_USB_GADGET=y 124CONFIG_USB_GADGET=y
77CONFIG_USB_ZERO=m 125CONFIG_USB_ZERO=m
126CONFIG_USB_ETH=m
78CONFIG_USB_GADGETFS=m 127CONFIG_USB_GADGETFS=m
79CONFIG_USB_FILE_STORAGE=m 128CONFIG_USB_FILE_STORAGE=m
80CONFIG_USB_G_SERIAL=m 129CONFIG_USB_G_SERIAL=m
81CONFIG_MMC=y 130CONFIG_MMC=y
82CONFIG_MMC_AT91=m 131CONFIG_MMC_AT91=m
132CONFIG_NEW_LEDS=y
133CONFIG_LEDS_CLASS=y
134CONFIG_LEDS_GPIO=y
135CONFIG_LEDS_TRIGGERS=y
136CONFIG_LEDS_TRIGGER_TIMER=y
137CONFIG_LEDS_TRIGGER_HEARTBEAT=y
138CONFIG_LEDS_TRIGGER_GPIO=y
83CONFIG_RTC_CLASS=y 139CONFIG_RTC_CLASS=y
84CONFIG_RTC_DRV_AT91SAM9=y 140CONFIG_RTC_DRV_AT91SAM9=y
85CONFIG_EXT2_FS=y 141CONFIG_MSDOS_FS=y
86CONFIG_INOTIFY=y
87CONFIG_VFAT_FS=y 142CONFIG_VFAT_FS=y
88CONFIG_TMPFS=y 143CONFIG_TMPFS=y
89CONFIG_CRAMFS=y 144CONFIG_UBIFS_FS=y
145CONFIG_UBIFS_FS_ADVANCED_COMPR=y
146CONFIG_SQUASHFS=y
147CONFIG_SQUASHFS_LZO=y
148CONFIG_SQUASHFS_XZ=y
149CONFIG_NFS_FS=y
150CONFIG_NFS_V3=y
151CONFIG_ROOT_NFS=y
90CONFIG_NLS_CODEPAGE_437=y 152CONFIG_NLS_CODEPAGE_437=y
91CONFIG_NLS_CODEPAGE_850=y 153CONFIG_NLS_CODEPAGE_850=y
92CONFIG_NLS_ISO8859_1=y 154CONFIG_NLS_ISO8859_1=y
93CONFIG_DEBUG_KERNEL=y 155CONFIG_NLS_ISO8859_15=y
94CONFIG_DEBUG_USER=y 156CONFIG_NLS_UTF8=y
95CONFIG_DEBUG_LL=y 157CONFIG_FTRACE=y
158CONFIG_CRC_CCITT=m
diff --git a/arch/arm/configs/at91sam9263ek_defconfig b/arch/arm/configs/at91sam9263_defconfig
index 8a04d6f4e065..1cf96264cba1 100644
--- a/arch/arm/configs/at91sam9263ek_defconfig
+++ b/arch/arm/configs/at91sam9263_defconfig
@@ -1,9 +1,13 @@
1CONFIG_EXPERIMENTAL=y 1CONFIG_EXPERIMENTAL=y
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_KERNEL_LZMA=y
3# CONFIG_SWAP is not set 4# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 5CONFIG_SYSVIPC=y
6CONFIG_IKCONFIG=y
7CONFIG_IKCONFIG_PROC=y
5CONFIG_LOG_BUF_SHIFT=14 8CONFIG_LOG_BUF_SHIFT=14
6CONFIG_BLK_DEV_INITRD=y 9CONFIG_NAMESPACES=y
10CONFIG_EMBEDDED=y
7CONFIG_SLAB=y 11CONFIG_SLAB=y
8CONFIG_MODULES=y 12CONFIG_MODULES=y
9CONFIG_MODULE_UNLOAD=y 13CONFIG_MODULE_UNLOAD=y
@@ -13,53 +17,81 @@ CONFIG_MODULE_UNLOAD=y
13CONFIG_ARCH_AT91=y 17CONFIG_ARCH_AT91=y
14CONFIG_ARCH_AT91SAM9263=y 18CONFIG_ARCH_AT91SAM9263=y
15CONFIG_MACH_AT91SAM9263EK=y 19CONFIG_MACH_AT91SAM9263EK=y
20CONFIG_MACH_USB_A9263=y
21CONFIG_MACH_NEOCORE926=y
16CONFIG_MTD_AT91_DATAFLASH_CARD=y 22CONFIG_MTD_AT91_DATAFLASH_CARD=y
17# CONFIG_ARM_THUMB is not set 23# CONFIG_ARM_THUMB is not set
24CONFIG_AEABI=y
25# CONFIG_OABI_COMPAT is not set
18CONFIG_ZBOOT_ROM_TEXT=0x0 26CONFIG_ZBOOT_ROM_TEXT=0x0
19CONFIG_ZBOOT_ROM_BSS=0x0 27CONFIG_ZBOOT_ROM_BSS=0x0
20CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw" 28CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
21CONFIG_FPE_NWFPE=y 29CONFIG_AUTO_ZRELADDR=y
22CONFIG_NET=y 30CONFIG_NET=y
23CONFIG_PACKET=y 31CONFIG_PACKET=y
24CONFIG_UNIX=y 32CONFIG_UNIX=y
33CONFIG_NET_KEY=y
25CONFIG_INET=y 34CONFIG_INET=y
35CONFIG_IP_MULTICAST=y
36CONFIG_IP_ADVANCED_ROUTER=y
37CONFIG_IP_ROUTE_VERBOSE=y
26CONFIG_IP_PNP=y 38CONFIG_IP_PNP=y
39CONFIG_IP_PNP_DHCP=y
27CONFIG_IP_PNP_BOOTP=y 40CONFIG_IP_PNP_BOOTP=y
28CONFIG_IP_PNP_RARP=y 41CONFIG_IP_PNP_RARP=y
42CONFIG_NET_IPIP=y
43CONFIG_IP_MROUTE=y
44CONFIG_IP_PIMSM_V1=y
45CONFIG_IP_PIMSM_V2=y
29# CONFIG_INET_XFRM_MODE_TRANSPORT is not set 46# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
30# CONFIG_INET_XFRM_MODE_TUNNEL is not set 47# CONFIG_INET_XFRM_MODE_TUNNEL is not set
31# CONFIG_INET_XFRM_MODE_BEET is not set 48# CONFIG_INET_XFRM_MODE_BEET is not set
32# CONFIG_INET_LRO is not set 49# CONFIG_INET_LRO is not set
33# CONFIG_INET_DIAG is not set 50# CONFIG_INET_DIAG is not set
34# CONFIG_IPV6 is not set 51CONFIG_IPV6=y
35CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 52CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
36CONFIG_MTD=y 53CONFIG_MTD=y
37CONFIG_MTD_PARTITIONS=y 54CONFIG_MTD_PARTITIONS=y
38CONFIG_MTD_CMDLINE_PARTS=y 55CONFIG_MTD_CMDLINE_PARTS=y
39CONFIG_MTD_CHAR=y 56CONFIG_MTD_CHAR=y
40CONFIG_MTD_BLOCK=y 57CONFIG_MTD_BLOCK=y
58CONFIG_NFTL=y
59CONFIG_NFTL_RW=y
41CONFIG_MTD_DATAFLASH=y 60CONFIG_MTD_DATAFLASH=y
61CONFIG_MTD_BLOCK2MTD=y
42CONFIG_MTD_NAND=y 62CONFIG_MTD_NAND=y
43CONFIG_MTD_NAND_ATMEL=y 63CONFIG_MTD_NAND_ATMEL=y
64CONFIG_MTD_NAND_ATMEL_ECC_SOFT=y
65CONFIG_MTD_UBI=y
66CONFIG_MTD_UBI_GLUEBI=y
44CONFIG_BLK_DEV_LOOP=y 67CONFIG_BLK_DEV_LOOP=y
45CONFIG_BLK_DEV_RAM=y 68CONFIG_BLK_DEV_RAM=y
46CONFIG_BLK_DEV_RAM_SIZE=8192 69CONFIG_BLK_DEV_RAM_SIZE=8192
47CONFIG_ATMEL_SSC=y 70CONFIG_MISC_DEVICES=y
71CONFIG_ATMEL_PWM=y
72CONFIG_ATMEL_TCLIB=y
48CONFIG_SCSI=y 73CONFIG_SCSI=y
49CONFIG_BLK_DEV_SD=y 74CONFIG_BLK_DEV_SD=y
50CONFIG_SCSI_MULTI_LUN=y 75CONFIG_SCSI_MULTI_LUN=y
51CONFIG_NETDEVICES=y 76CONFIG_NETDEVICES=y
52CONFIG_NET_ETHERNET=y
53CONFIG_MII=y 77CONFIG_MII=y
78CONFIG_SMSC_PHY=y
79CONFIG_NET_ETHERNET=y
54CONFIG_MACB=y 80CONFIG_MACB=y
81# CONFIG_NETDEV_1000 is not set
82# CONFIG_NETDEV_10000 is not set
83CONFIG_USB_ZD1201=m
84CONFIG_INPUT_POLLDEV=m
55# CONFIG_INPUT_MOUSEDEV_PSAUX is not set 85# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
86CONFIG_INPUT_MOUSEDEV_SCREEN_X=240
87CONFIG_INPUT_MOUSEDEV_SCREEN_Y=320
56CONFIG_INPUT_EVDEV=y 88CONFIG_INPUT_EVDEV=y
57# CONFIG_KEYBOARD_ATKBD is not set 89# CONFIG_KEYBOARD_ATKBD is not set
58CONFIG_KEYBOARD_GPIO=y 90CONFIG_KEYBOARD_GPIO=y
59# CONFIG_INPUT_MOUSE is not set 91# CONFIG_INPUT_MOUSE is not set
60CONFIG_INPUT_TOUCHSCREEN=y 92CONFIG_INPUT_TOUCHSCREEN=y
61CONFIG_TOUCHSCREEN_ADS7846=y 93CONFIG_TOUCHSCREEN_ADS7846=y
62# CONFIG_SERIO is not set 94CONFIG_LEGACY_PTY_COUNT=4
63CONFIG_SERIAL_ATMEL=y 95CONFIG_SERIAL_ATMEL=y
64CONFIG_SERIAL_ATMEL_CONSOLE=y 96CONFIG_SERIAL_ATMEL_CONSOLE=y
65CONFIG_HW_RANDOM=y 97CONFIG_HW_RANDOM=y
@@ -74,8 +106,25 @@ CONFIG_WATCHDOG_NOWAYOUT=y
74CONFIG_AT91SAM9X_WATCHDOG=y 106CONFIG_AT91SAM9X_WATCHDOG=y
75CONFIG_FB=y 107CONFIG_FB=y
76CONFIG_FB_ATMEL=y 108CONFIG_FB_ATMEL=y
77# CONFIG_VGA_CONSOLE is not set 109CONFIG_BACKLIGHT_LCD_SUPPORT=y
78# CONFIG_USB_HID is not set 110CONFIG_LCD_CLASS_DEVICE=y
111CONFIG_BACKLIGHT_CLASS_DEVICE=y
112CONFIG_BACKLIGHT_ATMEL_LCDC=y
113CONFIG_FRAMEBUFFER_CONSOLE=y
114CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
115CONFIG_LOGO=y
116CONFIG_SOUND=y
117CONFIG_SND=y
118CONFIG_SND_SEQUENCER=y
119CONFIG_SND_MIXER_OSS=y
120CONFIG_SND_PCM_OSS=y
121# CONFIG_SND_SUPPORT_OLD_API is not set
122# CONFIG_SND_VERBOSE_PROCFS is not set
123# CONFIG_SND_DRIVERS is not set
124# CONFIG_SND_ARM is not set
125CONFIG_SND_ATMEL_AC97C=y
126# CONFIG_SND_SPI is not set
127CONFIG_SND_USB_AUDIO=m
79CONFIG_USB=y 128CONFIG_USB=y
80CONFIG_USB_DEVICEFS=y 129CONFIG_USB_DEVICEFS=y
81CONFIG_USB_MON=y 130CONFIG_USB_MON=y
@@ -83,24 +132,37 @@ CONFIG_USB_OHCI_HCD=y
83CONFIG_USB_STORAGE=y 132CONFIG_USB_STORAGE=y
84CONFIG_USB_GADGET=y 133CONFIG_USB_GADGET=y
85CONFIG_USB_ZERO=m 134CONFIG_USB_ZERO=m
135CONFIG_USB_ETH=m
86CONFIG_USB_GADGETFS=m 136CONFIG_USB_GADGETFS=m
87CONFIG_USB_FILE_STORAGE=m 137CONFIG_USB_FILE_STORAGE=m
88CONFIG_USB_G_SERIAL=m 138CONFIG_USB_G_SERIAL=m
89CONFIG_MMC=y 139CONFIG_MMC=y
140CONFIG_SDIO_UART=m
90CONFIG_MMC_AT91=m 141CONFIG_MMC_AT91=m
142CONFIG_NEW_LEDS=y
143CONFIG_LEDS_CLASS=y
144CONFIG_LEDS_ATMEL_PWM=y
145CONFIG_LEDS_GPIO=y
146CONFIG_LEDS_TRIGGERS=y
147CONFIG_LEDS_TRIGGER_HEARTBEAT=y
91CONFIG_RTC_CLASS=y 148CONFIG_RTC_CLASS=y
92CONFIG_RTC_DRV_AT91SAM9=y 149CONFIG_RTC_DRV_AT91SAM9=y
93CONFIG_EXT2_FS=y 150CONFIG_EXT2_FS=y
94CONFIG_INOTIFY=y 151CONFIG_FUSE_FS=m
95CONFIG_VFAT_FS=y 152CONFIG_VFAT_FS=y
96CONFIG_TMPFS=y 153CONFIG_TMPFS=y
97CONFIG_JFFS2_FS=y 154CONFIG_JFFS2_FS=y
155CONFIG_UBIFS_FS=y
156CONFIG_UBIFS_FS_ADVANCED_COMPR=y
98CONFIG_CRAMFS=y 157CONFIG_CRAMFS=y
99CONFIG_NFS_FS=y 158CONFIG_NFS_FS=y
159CONFIG_NFS_V3=y
160CONFIG_NFS_V3_ACL=y
161CONFIG_NFS_V4=y
100CONFIG_ROOT_NFS=y 162CONFIG_ROOT_NFS=y
101CONFIG_NLS_CODEPAGE_437=y 163CONFIG_NLS_CODEPAGE_437=y
102CONFIG_NLS_CODEPAGE_850=y 164CONFIG_NLS_CODEPAGE_850=y
103CONFIG_NLS_ISO8859_1=y 165CONFIG_NLS_ISO8859_1=y
104CONFIG_DEBUG_KERNEL=y 166CONFIG_FTRACE=y
105CONFIG_DEBUG_USER=y 167CONFIG_DEBUG_USER=y
106CONFIG_DEBUG_LL=y 168CONFIG_XZ_DEC=y
diff --git a/arch/arm/configs/exynos4_defconfig b/arch/arm/configs/exynos4_defconfig
index 2ffba24d2e2a..da53ff3b4d70 100644
--- a/arch/arm/configs/exynos4_defconfig
+++ b/arch/arm/configs/exynos4_defconfig
@@ -8,7 +8,9 @@ CONFIG_ARCH_EXYNOS4=y
8CONFIG_S3C_LOWLEVEL_UART_PORT=1 8CONFIG_S3C_LOWLEVEL_UART_PORT=1
9CONFIG_MACH_SMDKC210=y 9CONFIG_MACH_SMDKC210=y
10CONFIG_MACH_SMDKV310=y 10CONFIG_MACH_SMDKV310=y
11CONFIG_MACH_ARMLEX4210=y
11CONFIG_MACH_UNIVERSAL_C210=y 12CONFIG_MACH_UNIVERSAL_C210=y
13CONFIG_MACH_NURI=y
12CONFIG_NO_HZ=y 14CONFIG_NO_HZ=y
13CONFIG_HIGH_RES_TIMERS=y 15CONFIG_HIGH_RES_TIMERS=y
14CONFIG_SMP=y 16CONFIG_SMP=y
diff --git a/arch/arm/configs/neocore926_defconfig b/arch/arm/configs/neocore926_defconfig
deleted file mode 100644
index 462dd1850d15..000000000000
--- a/arch/arm/configs/neocore926_defconfig
+++ /dev/null
@@ -1,104 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_LOCALVERSION_AUTO is not set
3# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y
5CONFIG_BLK_DEV_INITRD=y
6# CONFIG_COMPAT_BRK is not set
7CONFIG_MODULES=y
8CONFIG_MODULE_UNLOAD=y
9# CONFIG_BLK_DEV_BSG is not set
10# CONFIG_IOSCHED_DEADLINE is not set
11# CONFIG_IOSCHED_CFQ is not set
12CONFIG_ARCH_AT91=y
13CONFIG_ARCH_AT91SAM9263=y
14CONFIG_MACH_NEOCORE926=y
15CONFIG_MTD_AT91_DATAFLASH_CARD=y
16CONFIG_ZBOOT_ROM_TEXT=0x0
17CONFIG_ZBOOT_ROM_BSS=0x0
18CONFIG_FPE_NWFPE=y
19CONFIG_NET=y
20CONFIG_PACKET=y
21CONFIG_UNIX=y
22CONFIG_NET_KEY=y
23CONFIG_INET=y
24CONFIG_IP_PNP=y
25CONFIG_IP_PNP_DHCP=y
26CONFIG_IP_PNP_BOOTP=y
27CONFIG_IP_PNP_RARP=y
28CONFIG_NET_IPIP=y
29# CONFIG_INET_LRO is not set
30CONFIG_IPV6=y
31CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
32# CONFIG_PREVENT_FIRMWARE_BUILD is not set
33CONFIG_MTD=y
34CONFIG_MTD_PARTITIONS=y
35CONFIG_MTD_CHAR=y
36CONFIG_MTD_BLOCK=y
37CONFIG_NFTL=y
38CONFIG_NFTL_RW=y
39CONFIG_MTD_BLOCK2MTD=y
40CONFIG_MTD_NAND=y
41CONFIG_MTD_NAND_ECC_SMC=y
42CONFIG_MTD_NAND_VERIFY_WRITE=y
43CONFIG_MTD_NAND_ATMEL=y
44CONFIG_MTD_NAND_PLATFORM=y
45CONFIG_BLK_DEV_LOOP=y
46CONFIG_BLK_DEV_NBD=y
47CONFIG_ATMEL_PWM=y
48CONFIG_ATMEL_TCLIB=y
49CONFIG_SCSI=y
50CONFIG_CHR_DEV_SG=y
51CONFIG_NETDEVICES=y
52CONFIG_SMSC_PHY=y
53CONFIG_NET_ETHERNET=y
54CONFIG_MACB=y
55# CONFIG_NETDEV_1000 is not set
56# CONFIG_NETDEV_10000 is not set
57# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
58CONFIG_INPUT_EVDEV=y
59CONFIG_INPUT_TOUCHSCREEN=y
60CONFIG_TOUCHSCREEN_ADS7846=y
61CONFIG_VT_HW_CONSOLE_BINDING=y
62# CONFIG_DEVKMEM is not set
63CONFIG_SERIAL_NONSTANDARD=y
64CONFIG_SERIAL_ATMEL=y
65CONFIG_SERIAL_ATMEL_CONSOLE=y
66# CONFIG_SERIAL_ATMEL_PDC is not set
67# CONFIG_HW_RANDOM is not set
68CONFIG_I2C=y
69CONFIG_I2C_CHARDEV=y
70CONFIG_SPI=y
71CONFIG_SPI_ATMEL=y
72# CONFIG_HWMON is not set
73CONFIG_VIDEO_OUTPUT_CONTROL=y
74CONFIG_FB=y
75CONFIG_FB_ATMEL=y
76CONFIG_BACKLIGHT_LCD_SUPPORT=y
77CONFIG_LCD_CLASS_DEVICE=y
78CONFIG_BACKLIGHT_CLASS_DEVICE=y
79CONFIG_BACKLIGHT_ATMEL_LCDC=y
80# CONFIG_VGA_CONSOLE is not set
81CONFIG_FRAMEBUFFER_CONSOLE=y
82CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
83CONFIG_LOGO=y
84CONFIG_USB=y
85CONFIG_USB_DEVICEFS=y
86CONFIG_USB_MON=y
87CONFIG_USB_OHCI_HCD=y
88CONFIG_USB_STORAGE=y
89CONFIG_MMC=y
90CONFIG_SDIO_UART=y
91CONFIG_MMC_AT91=m
92CONFIG_EXT2_FS=y
93# CONFIG_DNOTIFY is not set
94CONFIG_AUTOFS_FS=y
95CONFIG_VFAT_FS=y
96CONFIG_TMPFS=y
97CONFIG_JFFS2_FS=y
98CONFIG_JFFS2_FS_WBUF_VERIFY=y
99CONFIG_NFS_FS=y
100CONFIG_ROOT_NFS=y
101# CONFIG_ENABLE_WARN_DEPRECATED is not set
102# CONFIG_ENABLE_MUST_CHECK is not set
103CONFIG_SYSCTL_SYSCALL_CHECK=y
104# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arm/configs/s5p6442_defconfig b/arch/arm/configs/s5p6442_defconfig
deleted file mode 100644
index 0e92a784af66..000000000000
--- a/arch/arm/configs/s5p6442_defconfig
+++ /dev/null
@@ -1,65 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2CONFIG_SYSFS_DEPRECATED_V2=y
3CONFIG_BLK_DEV_INITRD=y
4CONFIG_KALLSYMS_ALL=y
5CONFIG_MODULES=y
6CONFIG_MODULE_UNLOAD=y
7# CONFIG_BLK_DEV_BSG is not set
8CONFIG_ARCH_S5P6442=y
9CONFIG_S3C_LOWLEVEL_UART_PORT=1
10CONFIG_MACH_SMDK6442=y
11CONFIG_CPU_32v6K=y
12CONFIG_AEABI=y
13CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x20800000,8M console=ttySAC1,115200 init=/linuxrc"
14CONFIG_FPE_NWFPE=y
15CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
16# CONFIG_PREVENT_FIRMWARE_BUILD is not set
17CONFIG_BLK_DEV_LOOP=y
18CONFIG_BLK_DEV_RAM=y
19CONFIG_BLK_DEV_RAM_SIZE=8192
20# CONFIG_MISC_DEVICES is not set
21CONFIG_SCSI=y
22CONFIG_BLK_DEV_SD=y
23CONFIG_CHR_DEV_SG=y
24CONFIG_INPUT_EVDEV=y
25# CONFIG_INPUT_KEYBOARD is not set
26# CONFIG_INPUT_MOUSE is not set
27CONFIG_INPUT_TOUCHSCREEN=y
28CONFIG_SERIAL_8250=y
29CONFIG_SERIAL_8250_NR_UARTS=3
30CONFIG_SERIAL_SAMSUNG=y
31CONFIG_SERIAL_SAMSUNG_CONSOLE=y
32CONFIG_HW_RANDOM=y
33# CONFIG_HWMON is not set
34# CONFIG_VGA_CONSOLE is not set
35# CONFIG_HID_SUPPORT is not set
36# CONFIG_USB_SUPPORT is not set
37CONFIG_EXT2_FS=y
38CONFIG_INOTIFY=y
39CONFIG_MSDOS_FS=y
40CONFIG_VFAT_FS=y
41CONFIG_TMPFS=y
42CONFIG_TMPFS_POSIX_ACL=y
43CONFIG_CRAMFS=y
44CONFIG_ROMFS_FS=y
45CONFIG_PARTITION_ADVANCED=y
46CONFIG_BSD_DISKLABEL=y
47CONFIG_SOLARIS_X86_PARTITION=y
48CONFIG_NLS_CODEPAGE_437=y
49CONFIG_NLS_ASCII=y
50CONFIG_NLS_ISO8859_1=y
51CONFIG_MAGIC_SYSRQ=y
52CONFIG_DEBUG_KERNEL=y
53CONFIG_DEBUG_RT_MUTEXES=y
54CONFIG_DEBUG_SPINLOCK=y
55CONFIG_DEBUG_MUTEXES=y
56CONFIG_DEBUG_SPINLOCK_SLEEP=y
57CONFIG_DEBUG_INFO=y
58# CONFIG_RCU_CPU_STALL_DETECTOR is not set
59CONFIG_SYSCTL_SYSCALL_CHECK=y
60# CONFIG_ARM_UNWIND is not set
61CONFIG_DEBUG_USER=y
62CONFIG_DEBUG_ERRORS=y
63CONFIG_DEBUG_LL=y
64CONFIG_DEBUG_S3C_UART=1
65CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/usb-a9263_defconfig b/arch/arm/configs/usb-a9263_defconfig
deleted file mode 100644
index ee82d09249c6..000000000000
--- a/arch/arm/configs/usb-a9263_defconfig
+++ /dev/null
@@ -1,106 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_LOCALVERSION_AUTO is not set
3# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y
5CONFIG_LOG_BUF_SHIFT=14
6# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
7CONFIG_SLAB=y
8CONFIG_MODULES=y
9CONFIG_MODULE_UNLOAD=y
10# CONFIG_BLK_DEV_BSG is not set
11# CONFIG_IOSCHED_DEADLINE is not set
12# CONFIG_IOSCHED_CFQ is not set
13CONFIG_ARCH_AT91=y
14CONFIG_ARCH_AT91SAM9263=y
15CONFIG_MACH_USB_A9263=y
16CONFIG_AT91_SLOW_CLOCK=y
17# CONFIG_ARM_THUMB is not set
18CONFIG_AEABI=y
19CONFIG_ZBOOT_ROM_TEXT=0x0
20CONFIG_ZBOOT_ROM_BSS=0x0
21CONFIG_CMDLINE="mem=64M console=ttyS0,115200"
22CONFIG_FPE_NWFPE=y
23CONFIG_PM=y
24CONFIG_NET=y
25CONFIG_PACKET=y
26CONFIG_UNIX=y
27CONFIG_INET=y
28CONFIG_IP_MULTICAST=y
29CONFIG_IP_ADVANCED_ROUTER=y
30CONFIG_IP_ROUTE_VERBOSE=y
31CONFIG_IP_PNP=y
32CONFIG_IP_PNP_BOOTP=y
33CONFIG_IP_PNP_RARP=y
34CONFIG_IP_MROUTE=y
35CONFIG_IP_PIMSM_V1=y
36CONFIG_IP_PIMSM_V2=y
37# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
38# CONFIG_INET_XFRM_MODE_TUNNEL is not set
39# CONFIG_INET_XFRM_MODE_BEET is not set
40# CONFIG_INET_LRO is not set
41# CONFIG_INET_DIAG is not set
42# CONFIG_IPV6 is not set
43CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
44CONFIG_MTD=y
45CONFIG_MTD_PARTITIONS=y
46CONFIG_MTD_CMDLINE_PARTS=y
47CONFIG_MTD_CHAR=y
48CONFIG_MTD_BLOCK=y
49CONFIG_MTD_DATAFLASH=y
50CONFIG_MTD_NAND=y
51CONFIG_MTD_NAND_ATMEL=y
52CONFIG_MTD_NAND_ATMEL_ECC_SOFT=y
53CONFIG_BLK_DEV_LOOP=y
54# CONFIG_MISC_DEVICES is not set
55CONFIG_SCSI=y
56CONFIG_BLK_DEV_SD=y
57CONFIG_SCSI_MULTI_LUN=y
58CONFIG_NETDEVICES=y
59CONFIG_NET_ETHERNET=y
60CONFIG_MII=y
61CONFIG_MACB=y
62# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
63CONFIG_INPUT_EVDEV=y
64CONFIG_INPUT_EVBUG=y
65# CONFIG_KEYBOARD_ATKBD is not set
66CONFIG_KEYBOARD_GPIO=y
67# CONFIG_INPUT_MOUSE is not set
68# CONFIG_SERIO is not set
69CONFIG_SERIAL_ATMEL=y
70CONFIG_SERIAL_ATMEL_CONSOLE=y
71CONFIG_HW_RANDOM=y
72CONFIG_SPI=y
73CONFIG_SPI_ATMEL=y
74# CONFIG_HWMON is not set
75# CONFIG_VGA_CONSOLE is not set
76# CONFIG_USB_HID is not set
77CONFIG_USB=y
78CONFIG_USB_DEVICEFS=y
79CONFIG_USB_MON=y
80CONFIG_USB_OHCI_HCD=y
81CONFIG_USB_STORAGE=y
82CONFIG_USB_GADGET=y
83CONFIG_USB_ETH=m
84CONFIG_NEW_LEDS=y
85CONFIG_LEDS_CLASS=y
86CONFIG_LEDS_GPIO=y
87CONFIG_LEDS_TRIGGERS=y
88CONFIG_LEDS_TRIGGER_HEARTBEAT=y
89CONFIG_EXT2_FS=y
90CONFIG_INOTIFY=y
91CONFIG_FUSE_FS=m
92CONFIG_VFAT_FS=y
93CONFIG_TMPFS=y
94CONFIG_JFFS2_FS=y
95CONFIG_NFS_FS=y
96CONFIG_NFS_V3=y
97CONFIG_NFS_V3_ACL=y
98CONFIG_NFS_V4=y
99CONFIG_ROOT_NFS=y
100CONFIG_NLS_CODEPAGE_437=y
101CONFIG_NLS_CODEPAGE_850=y
102CONFIG_NLS_ISO8859_1=y
103CONFIG_DEBUG_KERNEL=y
104CONFIG_DEBUG_USER=y
105CONFIG_DEBUG_LL=y
106# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 6b7403fd8f54..b4892a06442c 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -203,8 +203,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
203#define find_first_bit(p,sz) _find_first_bit_le(p,sz) 203#define find_first_bit(p,sz) _find_first_bit_le(p,sz)
204#define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off) 204#define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off)
205 205
206#define WORD_BITOFF_TO_LE(x) ((x))
207
208#else 206#else
209/* 207/*
210 * These are the big endian, atomic definitions. 208 * These are the big endian, atomic definitions.
@@ -214,8 +212,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
214#define find_first_bit(p,sz) _find_first_bit_be(p,sz) 212#define find_first_bit(p,sz) _find_first_bit_be(p,sz)
215#define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off) 213#define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off)
216 214
217#define WORD_BITOFF_TO_LE(x) ((x) ^ 0x18)
218
219#endif 215#endif
220 216
221#if __LINUX_ARM_ARCH__ < 5 217#if __LINUX_ARM_ARCH__ < 5
@@ -287,55 +283,29 @@ static inline int fls(int x)
287#include <asm-generic/bitops/hweight.h> 283#include <asm-generic/bitops/hweight.h>
288#include <asm-generic/bitops/lock.h> 284#include <asm-generic/bitops/lock.h>
289 285
290static inline void __set_bit_le(int nr, void *addr) 286#ifdef __ARMEB__
291{
292 __set_bit(WORD_BITOFF_TO_LE(nr), addr);
293}
294
295static inline void __clear_bit_le(int nr, void *addr)
296{
297 __clear_bit(WORD_BITOFF_TO_LE(nr), addr);
298}
299
300static inline int __test_and_set_bit_le(int nr, void *addr)
301{
302 return __test_and_set_bit(WORD_BITOFF_TO_LE(nr), addr);
303}
304
305static inline int test_and_set_bit_le(int nr, void *addr)
306{
307 return test_and_set_bit(WORD_BITOFF_TO_LE(nr), addr);
308}
309
310static inline int __test_and_clear_bit_le(int nr, void *addr)
311{
312 return __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), addr);
313}
314
315static inline int test_and_clear_bit_le(int nr, void *addr)
316{
317 return test_and_clear_bit(WORD_BITOFF_TO_LE(nr), addr);
318}
319
320static inline int test_bit_le(int nr, const void *addr)
321{
322 return test_bit(WORD_BITOFF_TO_LE(nr), addr);
323}
324 287
325static inline int find_first_zero_bit_le(const void *p, unsigned size) 288static inline int find_first_zero_bit_le(const void *p, unsigned size)
326{ 289{
327 return _find_first_zero_bit_le(p, size); 290 return _find_first_zero_bit_le(p, size);
328} 291}
292#define find_first_zero_bit_le find_first_zero_bit_le
329 293
330static inline int find_next_zero_bit_le(const void *p, int size, int offset) 294static inline int find_next_zero_bit_le(const void *p, int size, int offset)
331{ 295{
332 return _find_next_zero_bit_le(p, size, offset); 296 return _find_next_zero_bit_le(p, size, offset);
333} 297}
298#define find_next_zero_bit_le find_next_zero_bit_le
334 299
335static inline int find_next_bit_le(const void *p, int size, int offset) 300static inline int find_next_bit_le(const void *p, int size, int offset)
336{ 301{
337 return _find_next_bit_le(p, size, offset); 302 return _find_next_bit_le(p, size, offset);
338} 303}
304#define find_next_bit_le find_next_bit_le
305
306#endif
307
308#include <asm-generic/bitops/le.h>
339 309
340/* 310/*
341 * Ext2 is defined to use little-endian byte ordering. 311 * Ext2 is defined to use little-endian byte ordering.
diff --git a/arch/arm/include/asm/fiq.h b/arch/arm/include/asm/fiq.h
index 2242ce22ec6c..d493d0b742a1 100644
--- a/arch/arm/include/asm/fiq.h
+++ b/arch/arm/include/asm/fiq.h
@@ -4,6 +4,13 @@
4 * Support for FIQ on ARM architectures. 4 * Support for FIQ on ARM architectures.
5 * Written by Philip Blundell <philb@gnu.org>, 1998 5 * Written by Philip Blundell <philb@gnu.org>, 1998
6 * Re-written by Russell King 6 * Re-written by Russell King
7 *
8 * NOTE: The FIQ mode registers are not magically preserved across
9 * suspend/resume.
10 *
11 * Drivers which require these registers to be preserved across power
12 * management operations must implement appropriate suspend/resume handlers to
13 * save and restore them.
7 */ 14 */
8 15
9#ifndef __ASM_FIQ_H 16#ifndef __ASM_FIQ_H
@@ -29,9 +36,21 @@ struct fiq_handler {
29extern int claim_fiq(struct fiq_handler *f); 36extern int claim_fiq(struct fiq_handler *f);
30extern void release_fiq(struct fiq_handler *f); 37extern void release_fiq(struct fiq_handler *f);
31extern void set_fiq_handler(void *start, unsigned int length); 38extern void set_fiq_handler(void *start, unsigned int length);
32extern void set_fiq_regs(struct pt_regs *regs);
33extern void get_fiq_regs(struct pt_regs *regs);
34extern void enable_fiq(int fiq); 39extern void enable_fiq(int fiq);
35extern void disable_fiq(int fiq); 40extern void disable_fiq(int fiq);
36 41
42/* helpers defined in fiqasm.S: */
43extern void __set_fiq_regs(unsigned long const *regs);
44extern void __get_fiq_regs(unsigned long *regs);
45
46static inline void set_fiq_regs(struct pt_regs const *regs)
47{
48 __set_fiq_regs(&regs->ARM_r8);
49}
50
51static inline void get_fiq_regs(struct pt_regs *regs)
52{
53 __get_fiq_regs(&regs->ARM_r8);
54}
55
37#endif 56#endif
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index bf13b814c1b8..946f4d778f71 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -18,6 +18,8 @@ struct machine_desc {
18 unsigned int nr; /* architecture number */ 18 unsigned int nr; /* architecture number */
19 const char *name; /* architecture name */ 19 const char *name; /* architecture name */
20 unsigned long boot_params; /* tagged list */ 20 unsigned long boot_params; /* tagged list */
21 const char **dt_compat; /* array of device tree
22 * 'compatible' strings */
21 23
22 unsigned int nr_irqs; /* number of IRQs */ 24 unsigned int nr_irqs; /* number of IRQs */
23 25
@@ -48,6 +50,13 @@ struct machine_desc {
48extern struct machine_desc *machine_desc; 50extern struct machine_desc *machine_desc;
49 51
50/* 52/*
53 * Machine type table - also only accessible during boot
54 */
55extern struct machine_desc __arch_info_begin[], __arch_info_end[];
56#define for_each_machine_desc(p) \
57 for (p = __arch_info_begin; p < __arch_info_end; p++)
58
59/*
51 * Set of macros to define architecture features. This is built into 60 * Set of macros to define architecture features. This is built into
52 * a table by the linker. 61 * a table by the linker.
53 */ 62 */
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index f51a69595f6e..ac75d0848889 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -197,7 +197,7 @@ typedef unsigned long pgprot_t;
197 197
198typedef struct page *pgtable_t; 198typedef struct page *pgtable_t;
199 199
200#ifndef CONFIG_SPARSEMEM 200#ifdef CONFIG_HAVE_ARCH_PFN_VALID
201extern int pfn_valid(unsigned long); 201extern int pfn_valid(unsigned long);
202#endif 202#endif
203 203
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
new file mode 100644
index 000000000000..11b8708fc4db
--- /dev/null
+++ b/arch/arm/include/asm/prom.h
@@ -0,0 +1,37 @@
1/*
2 * arch/arm/include/asm/prom.h
3 *
4 * Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11#ifndef __ASMARM_PROM_H
12#define __ASMARM_PROM_H
13
14#ifdef CONFIG_OF
15
16#include <asm/setup.h>
17#include <asm/irq.h>
18
19static inline void irq_dispose_mapping(unsigned int virq)
20{
21 return;
22}
23
24extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
25extern void arm_dt_memblock_reserve(void);
26
27#else /* CONFIG_OF */
28
29static inline struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
30{
31 return NULL;
32}
33
34static inline void arm_dt_memblock_reserve(void) { }
35
36#endif /* CONFIG_OF */
37#endif /* ASMARM_PROM_H */
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 95176af3df8c..ee2ad8ae07af 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -217,6 +217,10 @@ extern struct meminfo meminfo;
217#define bank_phys_end(bank) ((bank)->start + (bank)->size) 217#define bank_phys_end(bank) ((bank)->start + (bank)->size)
218#define bank_phys_size(bank) (bank)->size 218#define bank_phys_size(bank) (bank)->size
219 219
220extern int arm_add_memory(phys_addr_t start, unsigned long size);
221extern void early_print(const char *str, ...);
222extern void dump_machine_table(void);
223
220#endif /* __KERNEL__ */ 224#endif /* __KERNEL__ */
221 225
222#endif 226#endif
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index d2b514fd76f4..e42d96a45d3e 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -70,6 +70,7 @@ extern void platform_smp_prepare_cpus(unsigned int);
70 */ 70 */
71struct secondary_data { 71struct secondary_data {
72 unsigned long pgdir; 72 unsigned long pgdir;
73 unsigned long swapper_pg_dir;
73 void *stack; 74 void *stack;
74}; 75};
75extern struct secondary_data secondary_data; 76extern struct secondary_data secondary_data;
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 87dbe3e21970..2c04ed5efeb5 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -400,6 +400,8 @@
400#define __NR_open_by_handle_at (__NR_SYSCALL_BASE+371) 400#define __NR_open_by_handle_at (__NR_SYSCALL_BASE+371)
401#define __NR_clock_adjtime (__NR_SYSCALL_BASE+372) 401#define __NR_clock_adjtime (__NR_SYSCALL_BASE+372)
402#define __NR_syncfs (__NR_SYSCALL_BASE+373) 402#define __NR_syncfs (__NR_SYSCALL_BASE+373)
403#define __NR_sendmmsg (__NR_SYSCALL_BASE+374)
404#define __NR_setns (__NR_SYSCALL_BASE+375)
403 405
404/* 406/*
405 * The following SWIs are ARM private. 407 * The following SWIs are ARM private.
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 8d95446150a3..a5b31af5c2b8 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -24,7 +24,7 @@ obj-$(CONFIG_OC_ETM) += etm.o
24 24
25obj-$(CONFIG_ISA_DMA_API) += dma.o 25obj-$(CONFIG_ISA_DMA_API) += dma.o
26obj-$(CONFIG_ARCH_ACORN) += ecard.o 26obj-$(CONFIG_ARCH_ACORN) += ecard.o
27obj-$(CONFIG_FIQ) += fiq.o 27obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
28obj-$(CONFIG_MODULES) += armksyms.o module.o 28obj-$(CONFIG_MODULES) += armksyms.o module.o
29obj-$(CONFIG_ARTHUR) += arthur.o 29obj-$(CONFIG_ARTHUR) += arthur.o
30obj-$(CONFIG_ISA_DMA) += dma-isa.o 30obj-$(CONFIG_ISA_DMA) += dma-isa.o
@@ -44,6 +44,7 @@ obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
44obj-$(CONFIG_KGDB) += kgdb.o 44obj-$(CONFIG_KGDB) += kgdb.o
45obj-$(CONFIG_ARM_UNWIND) += unwind.o 45obj-$(CONFIG_ARM_UNWIND) += unwind.o
46obj-$(CONFIG_HAVE_TCM) += tcm.o 46obj-$(CONFIG_HAVE_TCM) += tcm.o
47obj-$(CONFIG_OF) += devtree.o
47obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 48obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
48obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o 49obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o
49CFLAGS_swp_emulate.o := -Wa,-march=armv7-a 50CFLAGS_swp_emulate.o := -Wa,-march=armv7-a
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 7fbf28c35bb2..80f7896cc016 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -383,6 +383,8 @@
383 CALL(sys_open_by_handle_at) 383 CALL(sys_open_by_handle_at)
384 CALL(sys_clock_adjtime) 384 CALL(sys_clock_adjtime)
385 CALL(sys_syncfs) 385 CALL(sys_syncfs)
386 CALL(sys_sendmmsg)
387/* 375 */ CALL(sys_setns)
386#ifndef syscalls_counted 388#ifndef syscalls_counted
387.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 389.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
388#define syscalls_counted 390#define syscalls_counted
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
new file mode 100644
index 000000000000..a701e4226a6c
--- /dev/null
+++ b/arch/arm/kernel/devtree.c
@@ -0,0 +1,145 @@
1/*
2 * linux/arch/arm/kernel/devtree.c
3 *
4 * Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/errno.h>
14#include <linux/types.h>
15#include <linux/bootmem.h>
16#include <linux/memblock.h>
17#include <linux/of.h>
18#include <linux/of_fdt.h>
19#include <linux/of_irq.h>
20#include <linux/of_platform.h>
21
22#include <asm/setup.h>
23#include <asm/page.h>
24#include <asm/mach/arch.h>
25#include <asm/mach-types.h>
26
27void __init early_init_dt_add_memory_arch(u64 base, u64 size)
28{
29 arm_add_memory(base, size);
30}
31
32void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
33{
34 return alloc_bootmem_align(size, align);
35}
36
37void __init arm_dt_memblock_reserve(void)
38{
39 u64 *reserve_map, base, size;
40
41 if (!initial_boot_params)
42 return;
43
44 /* Reserve the dtb region */
45 memblock_reserve(virt_to_phys(initial_boot_params),
46 be32_to_cpu(initial_boot_params->totalsize));
47
48 /*
49 * Process the reserve map. This will probably overlap the initrd
50 * and dtb locations which are already reserved, but overlaping
51 * doesn't hurt anything
52 */
53 reserve_map = ((void*)initial_boot_params) +
54 be32_to_cpu(initial_boot_params->off_mem_rsvmap);
55 while (1) {
56 base = be64_to_cpup(reserve_map++);
57 size = be64_to_cpup(reserve_map++);
58 if (!size)
59 break;
60 memblock_reserve(base, size);
61 }
62}
63
64/**
65 * setup_machine_fdt - Machine setup when an dtb was passed to the kernel
66 * @dt_phys: physical address of dt blob
67 *
68 * If a dtb was passed to the kernel in r2, then use it to choose the
69 * correct machine_desc and to setup the system.
70 */
71struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
72{
73 struct boot_param_header *devtree;
74 struct machine_desc *mdesc, *mdesc_best = NULL;
75 unsigned int score, mdesc_score = ~1;
76 unsigned long dt_root;
77 const char *model;
78
79 devtree = phys_to_virt(dt_phys);
80
81 /* check device tree validity */
82 if (be32_to_cpu(devtree->magic) != OF_DT_HEADER)
83 return NULL;
84
85 /* Search the mdescs for the 'best' compatible value match */
86 initial_boot_params = devtree;
87 dt_root = of_get_flat_dt_root();
88 for_each_machine_desc(mdesc) {
89 score = of_flat_dt_match(dt_root, mdesc->dt_compat);
90 if (score > 0 && score < mdesc_score) {
91 mdesc_best = mdesc;
92 mdesc_score = score;
93 }
94 }
95 if (!mdesc_best) {
96 const char *prop;
97 long size;
98
99 early_print("\nError: unrecognized/unsupported "
100 "device tree compatible list:\n[ ");
101
102 prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
103 while (size > 0) {
104 early_print("'%s' ", prop);
105 size -= strlen(prop) + 1;
106 prop += strlen(prop) + 1;
107 }
108 early_print("]\n\n");
109
110 dump_machine_table(); /* does not return */
111 }
112
113 model = of_get_flat_dt_prop(dt_root, "model", NULL);
114 if (!model)
115 model = of_get_flat_dt_prop(dt_root, "compatible", NULL);
116 if (!model)
117 model = "<unknown>";
118 pr_info("Machine: %s, model: %s\n", mdesc_best->name, model);
119
120 /* Retrieve various information from the /chosen node */
121 of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
122 /* Initialize {size,address}-cells info */
123 of_scan_flat_dt(early_init_dt_scan_root, NULL);
124 /* Setup memory, calling early_init_dt_add_memory_arch */
125 of_scan_flat_dt(early_init_dt_scan_memory, NULL);
126
127 /* Change machine number to match the mdesc we're using */
128 __machine_arch_type = mdesc_best->nr;
129
130 return mdesc_best;
131}
132
133/**
134 * irq_create_of_mapping - Hook to resolve OF irq specifier into a Linux irq#
135 *
136 * Currently the mapping mechanism is trivial; simple flat hwirq numbers are
137 * mapped 1:1 onto Linux irq numbers. Cascaded irq controllers are not
138 * supported.
139 */
140unsigned int irq_create_of_mapping(struct device_node *controller,
141 const u32 *intspec, unsigned int intsize)
142{
143 return intspec[0];
144}
145EXPORT_SYMBOL_GPL(irq_create_of_mapping);
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index e72dc34eea1c..4c164ece5891 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -89,47 +89,6 @@ void set_fiq_handler(void *start, unsigned int length)
89 flush_icache_range(0x1c, 0x1c + length); 89 flush_icache_range(0x1c, 0x1c + length);
90} 90}
91 91
92/*
93 * Taking an interrupt in FIQ mode is death, so both these functions
94 * disable irqs for the duration. Note - these functions are almost
95 * entirely coded in assembly.
96 */
97void __naked set_fiq_regs(struct pt_regs *regs)
98{
99 register unsigned long tmp;
100 asm volatile (
101 "mov ip, sp\n\
102 stmfd sp!, {fp, ip, lr, pc}\n\
103 sub fp, ip, #4\n\
104 mrs %0, cpsr\n\
105 msr cpsr_c, %2 @ select FIQ mode\n\
106 mov r0, r0\n\
107 ldmia %1, {r8 - r14}\n\
108 msr cpsr_c, %0 @ return to SVC mode\n\
109 mov r0, r0\n\
110 ldmfd sp, {fp, sp, pc}"
111 : "=&r" (tmp)
112 : "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
113}
114
115void __naked get_fiq_regs(struct pt_regs *regs)
116{
117 register unsigned long tmp;
118 asm volatile (
119 "mov ip, sp\n\
120 stmfd sp!, {fp, ip, lr, pc}\n\
121 sub fp, ip, #4\n\
122 mrs %0, cpsr\n\
123 msr cpsr_c, %2 @ select FIQ mode\n\
124 mov r0, r0\n\
125 stmia %1, {r8 - r14}\n\
126 msr cpsr_c, %0 @ return to SVC mode\n\
127 mov r0, r0\n\
128 ldmfd sp, {fp, sp, pc}"
129 : "=&r" (tmp)
130 : "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
131}
132
133int claim_fiq(struct fiq_handler *f) 92int claim_fiq(struct fiq_handler *f)
134{ 93{
135 int ret = 0; 94 int ret = 0;
@@ -174,8 +133,8 @@ void disable_fiq(int fiq)
174} 133}
175 134
176EXPORT_SYMBOL(set_fiq_handler); 135EXPORT_SYMBOL(set_fiq_handler);
177EXPORT_SYMBOL(set_fiq_regs); 136EXPORT_SYMBOL(__set_fiq_regs); /* defined in fiqasm.S */
178EXPORT_SYMBOL(get_fiq_regs); 137EXPORT_SYMBOL(__get_fiq_regs); /* defined in fiqasm.S */
179EXPORT_SYMBOL(claim_fiq); 138EXPORT_SYMBOL(claim_fiq);
180EXPORT_SYMBOL(release_fiq); 139EXPORT_SYMBOL(release_fiq);
181EXPORT_SYMBOL(enable_fiq); 140EXPORT_SYMBOL(enable_fiq);
diff --git a/arch/arm/kernel/fiqasm.S b/arch/arm/kernel/fiqasm.S
new file mode 100644
index 000000000000..207f9d652010
--- /dev/null
+++ b/arch/arm/kernel/fiqasm.S
@@ -0,0 +1,49 @@
1/*
2 * linux/arch/arm/kernel/fiqasm.S
3 *
4 * Derived from code originally in linux/arch/arm/kernel/fiq.c:
5 *
6 * Copyright (C) 1998 Russell King
7 * Copyright (C) 1998, 1999 Phil Blundell
8 * Copyright (C) 2011, Linaro Limited
9 *
10 * FIQ support written by Philip Blundell <philb@gnu.org>, 1998.
11 *
12 * FIQ support re-written by Russell King to be more generic
13 *
14 * v7/Thumb-2 compatibility modifications by Linaro Limited, 2011.
15 */
16
17#include <linux/linkage.h>
18#include <asm/assembler.h>
19
20/*
21 * Taking an interrupt in FIQ mode is death, so both these functions
22 * disable irqs for the duration.
23 */
24
25ENTRY(__set_fiq_regs)
26 mov r2, #PSR_I_BIT | PSR_F_BIT | FIQ_MODE
27 mrs r1, cpsr
28 msr cpsr_c, r2 @ select FIQ mode
29 mov r0, r0 @ avoid hazard prior to ARMv4
30 ldmia r0!, {r8 - r12}
31 ldr sp, [r0], #4
32 ldr lr, [r0]
33 msr cpsr_c, r1 @ return to SVC mode
34 mov r0, r0 @ avoid hazard prior to ARMv4
35 mov pc, lr
36ENDPROC(__set_fiq_regs)
37
38ENTRY(__get_fiq_regs)
39 mov r2, #PSR_I_BIT | PSR_F_BIT | FIQ_MODE
40 mrs r1, cpsr
41 msr cpsr_c, r2 @ select FIQ mode
42 mov r0, r0 @ avoid hazard prior to ARMv4
43 stmia r0!, {r8 - r12}
44 str sp, [r0], #4
45 str lr, [r0]
46 msr cpsr_c, r1 @ return to SVC mode
47 mov r0, r0 @ avoid hazard prior to ARMv4
48 mov pc, lr
49ENDPROC(__get_fiq_regs)
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index c84b57d27d07..854bd22380d3 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -15,6 +15,12 @@
15#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2) 15#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2)
16#define ATAG_CORE_SIZE_EMPTY ((2*4) >> 2) 16#define ATAG_CORE_SIZE_EMPTY ((2*4) >> 2)
17 17
18#ifdef CONFIG_CPU_BIG_ENDIAN
19#define OF_DT_MAGIC 0xd00dfeed
20#else
21#define OF_DT_MAGIC 0xedfe0dd0 /* 0xd00dfeed in big-endian */
22#endif
23
18/* 24/*
19 * Exception handling. Something went wrong and we can't proceed. We 25 * Exception handling. Something went wrong and we can't proceed. We
20 * ought to tell the user, but since we don't have any guarantee that 26 * ought to tell the user, but since we don't have any guarantee that
@@ -28,20 +34,26 @@
28 34
29/* Determine validity of the r2 atags pointer. The heuristic requires 35/* Determine validity of the r2 atags pointer. The heuristic requires
30 * that the pointer be aligned, in the first 16k of physical RAM and 36 * that the pointer be aligned, in the first 16k of physical RAM and
31 * that the ATAG_CORE marker is first and present. Future revisions 37 * that the ATAG_CORE marker is first and present. If CONFIG_OF_FLATTREE
38 * is selected, then it will also accept a dtb pointer. Future revisions
32 * of this function may be more lenient with the physical address and 39 * of this function may be more lenient with the physical address and
33 * may also be able to move the ATAGS block if necessary. 40 * may also be able to move the ATAGS block if necessary.
34 * 41 *
35 * Returns: 42 * Returns:
36 * r2 either valid atags pointer, or zero 43 * r2 either valid atags pointer, valid dtb pointer, or zero
37 * r5, r6 corrupted 44 * r5, r6 corrupted
38 */ 45 */
39__vet_atags: 46__vet_atags:
40 tst r2, #0x3 @ aligned? 47 tst r2, #0x3 @ aligned?
41 bne 1f 48 bne 1f
42 49
43 ldr r5, [r2, #0] @ is first tag ATAG_CORE? 50 ldr r5, [r2, #0]
44 cmp r5, #ATAG_CORE_SIZE 51#ifdef CONFIG_OF_FLATTREE
52 ldr r6, =OF_DT_MAGIC @ is it a DTB?
53 cmp r5, r6
54 beq 2f
55#endif
56 cmp r5, #ATAG_CORE_SIZE @ is first tag ATAG_CORE?
45 cmpne r5, #ATAG_CORE_SIZE_EMPTY 57 cmpne r5, #ATAG_CORE_SIZE_EMPTY
46 bne 1f 58 bne 1f
47 ldr r5, [r2, #4] 59 ldr r5, [r2, #4]
@@ -49,7 +61,7 @@ __vet_atags:
49 cmp r5, r6 61 cmp r5, r6
50 bne 1f 62 bne 1f
51 63
52 mov pc, lr @ atag pointer is ok 642: mov pc, lr @ atag/dtb pointer is ok
53 65
541: mov r2, #0 661: mov r2, #0
55 mov pc, lr 67 mov pc, lr
@@ -61,7 +73,7 @@ ENDPROC(__vet_atags)
61 * 73 *
62 * r0 = cp#15 control register 74 * r0 = cp#15 control register
63 * r1 = machine ID 75 * r1 = machine ID
64 * r2 = atags pointer 76 * r2 = atags/dtb pointer
65 * r9 = processor ID 77 * r9 = processor ID
66 */ 78 */
67 __INIT 79 __INIT
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index c9173cfbbc74..278c1b0ebb2e 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -59,7 +59,7 @@
59 * 59 *
60 * This is normally called from the decompressor code. The requirements 60 * This is normally called from the decompressor code. The requirements
61 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0, 61 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
62 * r1 = machine nr, r2 = atags pointer. 62 * r1 = machine nr, r2 = atags or dtb pointer.
63 * 63 *
64 * This code is mostly position independent, so if you link the kernel at 64 * This code is mostly position independent, so if you link the kernel at
65 * 0xc0008000, you call this at __pa(0xc0008000). 65 * 0xc0008000, you call this at __pa(0xc0008000).
@@ -91,7 +91,7 @@ ENTRY(stext)
91#endif 91#endif
92 92
93 /* 93 /*
94 * r1 = machine no, r2 = atags, 94 * r1 = machine no, r2 = atags or dtb,
95 * r8 = phys_offset, r9 = cpuid, r10 = procinfo 95 * r8 = phys_offset, r9 = cpuid, r10 = procinfo
96 */ 96 */
97 bl __vet_atags 97 bl __vet_atags
@@ -113,6 +113,7 @@ ENTRY(stext)
113 ldr r13, =__mmap_switched @ address to jump to after 113 ldr r13, =__mmap_switched @ address to jump to after
114 @ mmu has been enabled 114 @ mmu has been enabled
115 adr lr, BSYM(1f) @ return (PIC) address 115 adr lr, BSYM(1f) @ return (PIC) address
116 mov r8, r4 @ set TTBR1 to swapper_pg_dir
116 ARM( add pc, r10, #PROCINFO_INITFUNC ) 117 ARM( add pc, r10, #PROCINFO_INITFUNC )
117 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 118 THUMB( add r12, r10, #PROCINFO_INITFUNC )
118 THUMB( mov pc, r12 ) 119 THUMB( mov pc, r12 )
@@ -302,8 +303,10 @@ ENTRY(secondary_startup)
302 */ 303 */
303 adr r4, __secondary_data 304 adr r4, __secondary_data
304 ldmia r4, {r5, r7, r12} @ address to jump to after 305 ldmia r4, {r5, r7, r12} @ address to jump to after
305 sub r4, r4, r5 @ mmu has been enabled 306 sub lr, r4, r5 @ mmu has been enabled
306 ldr r4, [r7, r4] @ get secondary_data.pgdir 307 ldr r4, [r7, lr] @ get secondary_data.pgdir
308 add r7, r7, #4
309 ldr r8, [r7, lr] @ get secondary_data.swapper_pg_dir
307 adr lr, BSYM(__enable_mmu) @ return address 310 adr lr, BSYM(__enable_mmu) @ return address
308 mov r13, r12 @ __secondary_switched address 311 mov r13, r12 @ __secondary_switched address
309 ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor 312 ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor
@@ -339,7 +342,7 @@ __secondary_data:
339 * 342 *
340 * r0 = cp#15 control register 343 * r0 = cp#15 control register
341 * r1 = machine ID 344 * r1 = machine ID
342 * r2 = atags pointer 345 * r2 = atags or dtb pointer
343 * r4 = page table pointer 346 * r4 = page table pointer
344 * r9 = processor ID 347 * r9 = processor ID
345 * r13 = *virtual* address to jump to upon completion 348 * r13 = *virtual* address to jump to upon completion
@@ -376,7 +379,7 @@ ENDPROC(__enable_mmu)
376 * 379 *
377 * r0 = cp#15 control register 380 * r0 = cp#15 control register
378 * r1 = machine ID 381 * r1 = machine ID
379 * r2 = atags pointer 382 * r2 = atags or dtb pointer
380 * r9 = processor ID 383 * r9 = processor ID
381 * r13 = *virtual* address to jump to upon completion 384 * r13 = *virtual* address to jump to upon completion
382 * 385 *
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 6dce209a623b..ed11fb08b05a 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -20,6 +20,7 @@
20#include <linux/screen_info.h> 20#include <linux/screen_info.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/kexec.h> 22#include <linux/kexec.h>
23#include <linux/of_fdt.h>
23#include <linux/crash_dump.h> 24#include <linux/crash_dump.h>
24#include <linux/root_dev.h> 25#include <linux/root_dev.h>
25#include <linux/cpu.h> 26#include <linux/cpu.h>
@@ -42,6 +43,7 @@
42#include <asm/cachetype.h> 43#include <asm/cachetype.h>
43#include <asm/tlbflush.h> 44#include <asm/tlbflush.h>
44 45
46#include <asm/prom.h>
45#include <asm/mach/arch.h> 47#include <asm/mach/arch.h>
46#include <asm/mach/irq.h> 48#include <asm/mach/irq.h>
47#include <asm/mach/time.h> 49#include <asm/mach/time.h>
@@ -309,7 +311,7 @@ static void __init cacheid_init(void)
309 */ 311 */
310extern struct proc_info_list *lookup_processor_type(unsigned int); 312extern struct proc_info_list *lookup_processor_type(unsigned int);
311 313
312static void __init early_print(const char *str, ...) 314void __init early_print(const char *str, ...)
313{ 315{
314 extern void printascii(const char *); 316 extern void printascii(const char *);
315 char buf[256]; 317 char buf[256];
@@ -439,25 +441,12 @@ void cpu_init(void)
439 : "r14"); 441 : "r14");
440} 442}
441 443
442static struct machine_desc * __init setup_machine(unsigned int nr) 444void __init dump_machine_table(void)
443{ 445{
444 extern struct machine_desc __arch_info_begin[], __arch_info_end[];
445 struct machine_desc *p; 446 struct machine_desc *p;
446 447
447 /* 448 early_print("Available machine support:\n\nID (hex)\tNAME\n");
448 * locate machine in the list of supported machines. 449 for_each_machine_desc(p)
449 */
450 for (p = __arch_info_begin; p < __arch_info_end; p++)
451 if (nr == p->nr) {
452 printk("Machine: %s\n", p->name);
453 return p;
454 }
455
456 early_print("\n"
457 "Error: unrecognized/unsupported machine ID (r1 = 0x%08x).\n\n"
458 "Available machine support:\n\nID (hex)\tNAME\n", nr);
459
460 for (p = __arch_info_begin; p < __arch_info_end; p++)
461 early_print("%08x\t%s\n", p->nr, p->name); 450 early_print("%08x\t%s\n", p->nr, p->name);
462 451
463 early_print("\nPlease check your kernel config and/or bootloader.\n"); 452 early_print("\nPlease check your kernel config and/or bootloader.\n");
@@ -466,7 +455,7 @@ static struct machine_desc * __init setup_machine(unsigned int nr)
466 /* can't use cpu_relax() here as it may require MMU setup */; 455 /* can't use cpu_relax() here as it may require MMU setup */;
467} 456}
468 457
469static int __init arm_add_memory(phys_addr_t start, unsigned long size) 458int __init arm_add_memory(phys_addr_t start, unsigned long size)
470{ 459{
471 struct membank *bank = &meminfo.bank[meminfo.nr_banks]; 460 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
472 461
@@ -801,23 +790,29 @@ static void __init squash_mem_tags(struct tag *tag)
801 tag->hdr.tag = ATAG_NONE; 790 tag->hdr.tag = ATAG_NONE;
802} 791}
803 792
804void __init setup_arch(char **cmdline_p) 793static struct machine_desc * __init setup_machine_tags(unsigned int nr)
805{ 794{
806 struct tag *tags = (struct tag *)&init_tags; 795 struct tag *tags = (struct tag *)&init_tags;
807 struct machine_desc *mdesc; 796 struct machine_desc *mdesc = NULL, *p;
808 char *from = default_command_line; 797 char *from = default_command_line;
809 798
810 init_tags.mem.start = PHYS_OFFSET; 799 init_tags.mem.start = PHYS_OFFSET;
811 800
812 unwind_init(); 801 /*
813 802 * locate machine in the list of supported machines.
814 setup_processor(); 803 */
815 mdesc = setup_machine(machine_arch_type); 804 for_each_machine_desc(p)
816 machine_desc = mdesc; 805 if (nr == p->nr) {
817 machine_name = mdesc->name; 806 printk("Machine: %s\n", p->name);
807 mdesc = p;
808 break;
809 }
818 810
819 if (mdesc->soft_reboot) 811 if (!mdesc) {
820 reboot_setup("s"); 812 early_print("\nError: unrecognized/unsupported machine ID"
813 " (r1 = 0x%08x).\n\n", nr);
814 dump_machine_table(); /* does not return */
815 }
821 816
822 if (__atags_pointer) 817 if (__atags_pointer)
823 tags = phys_to_virt(__atags_pointer); 818 tags = phys_to_virt(__atags_pointer);
@@ -849,8 +844,17 @@ void __init setup_arch(char **cmdline_p)
849 if (tags->hdr.tag != ATAG_CORE) 844 if (tags->hdr.tag != ATAG_CORE)
850 convert_to_tag_list(tags); 845 convert_to_tag_list(tags);
851#endif 846#endif
852 if (tags->hdr.tag != ATAG_CORE) 847
848 if (tags->hdr.tag != ATAG_CORE) {
849#if defined(CONFIG_OF)
850 /*
851 * If CONFIG_OF is set, then assume this is a reasonably
852 * modern system that should pass boot parameters
853 */
854 early_print("Warning: Neither atags nor dtb found\n");
855#endif
853 tags = (struct tag *)&init_tags; 856 tags = (struct tag *)&init_tags;
857 }
854 858
855 if (mdesc->fixup) 859 if (mdesc->fixup)
856 mdesc->fixup(mdesc, tags, &from, &meminfo); 860 mdesc->fixup(mdesc, tags, &from, &meminfo);
@@ -862,14 +866,34 @@ void __init setup_arch(char **cmdline_p)
862 parse_tags(tags); 866 parse_tags(tags);
863 } 867 }
864 868
869 /* parse_early_param needs a boot_command_line */
870 strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
871
872 return mdesc;
873}
874
875
876void __init setup_arch(char **cmdline_p)
877{
878 struct machine_desc *mdesc;
879
880 unwind_init();
881
882 setup_processor();
883 mdesc = setup_machine_fdt(__atags_pointer);
884 if (!mdesc)
885 mdesc = setup_machine_tags(machine_arch_type);
886 machine_desc = mdesc;
887 machine_name = mdesc->name;
888
889 if (mdesc->soft_reboot)
890 reboot_setup("s");
891
865 init_mm.start_code = (unsigned long) _text; 892 init_mm.start_code = (unsigned long) _text;
866 init_mm.end_code = (unsigned long) _etext; 893 init_mm.end_code = (unsigned long) _etext;
867 init_mm.end_data = (unsigned long) _edata; 894 init_mm.end_data = (unsigned long) _edata;
868 init_mm.brk = (unsigned long) _end; 895 init_mm.brk = (unsigned long) _end;
869 896
870 /* parse_early_param needs a boot_command_line */
871 strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
872
873 /* populate cmd_line too for later use, preserving boot_command_line */ 897 /* populate cmd_line too for later use, preserving boot_command_line */
874 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); 898 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
875 *cmdline_p = cmd_line; 899 *cmdline_p = cmd_line;
@@ -881,6 +905,8 @@ void __init setup_arch(char **cmdline_p)
881 paging_init(mdesc); 905 paging_init(mdesc);
882 request_standard_resources(mdesc); 906 request_standard_resources(mdesc);
883 907
908 unflatten_device_tree();
909
884#ifdef CONFIG_SMP 910#ifdef CONFIG_SMP
885 if (is_smp()) 911 if (is_smp())
886 smp_init_cpus(); 912 smp_init_cpus();
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index d439a8f4c078..344e52b16c8c 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -105,6 +105,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
105 */ 105 */
106 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; 106 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
107 secondary_data.pgdir = virt_to_phys(pgd); 107 secondary_data.pgdir = virt_to_phys(pgd);
108 secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
108 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); 109 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
109 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); 110 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
110 111
diff --git a/arch/arm/lib/lib1funcs.S b/arch/arm/lib/lib1funcs.S
index 6dc06487f3c3..c562f649734c 100644
--- a/arch/arm/lib/lib1funcs.S
+++ b/arch/arm/lib/lib1funcs.S
@@ -35,7 +35,7 @@ Boston, MA 02111-1307, USA. */
35 35
36#include <linux/linkage.h> 36#include <linux/linkage.h>
37#include <asm/assembler.h> 37#include <asm/assembler.h>
38 38#include <asm/unwind.h>
39 39
40.macro ARM_DIV_BODY dividend, divisor, result, curbit 40.macro ARM_DIV_BODY dividend, divisor, result, curbit
41 41
@@ -207,6 +207,7 @@ Boston, MA 02111-1307, USA. */
207 207
208ENTRY(__udivsi3) 208ENTRY(__udivsi3)
209ENTRY(__aeabi_uidiv) 209ENTRY(__aeabi_uidiv)
210UNWIND(.fnstart)
210 211
211 subs r2, r1, #1 212 subs r2, r1, #1
212 moveq pc, lr 213 moveq pc, lr
@@ -230,10 +231,12 @@ ENTRY(__aeabi_uidiv)
230 mov r0, r0, lsr r2 231 mov r0, r0, lsr r2
231 mov pc, lr 232 mov pc, lr
232 233
234UNWIND(.fnend)
233ENDPROC(__udivsi3) 235ENDPROC(__udivsi3)
234ENDPROC(__aeabi_uidiv) 236ENDPROC(__aeabi_uidiv)
235 237
236ENTRY(__umodsi3) 238ENTRY(__umodsi3)
239UNWIND(.fnstart)
237 240
238 subs r2, r1, #1 @ compare divisor with 1 241 subs r2, r1, #1 @ compare divisor with 1
239 bcc Ldiv0 242 bcc Ldiv0
@@ -247,10 +250,12 @@ ENTRY(__umodsi3)
247 250
248 mov pc, lr 251 mov pc, lr
249 252
253UNWIND(.fnend)
250ENDPROC(__umodsi3) 254ENDPROC(__umodsi3)
251 255
252ENTRY(__divsi3) 256ENTRY(__divsi3)
253ENTRY(__aeabi_idiv) 257ENTRY(__aeabi_idiv)
258UNWIND(.fnstart)
254 259
255 cmp r1, #0 260 cmp r1, #0
256 eor ip, r0, r1 @ save the sign of the result. 261 eor ip, r0, r1 @ save the sign of the result.
@@ -287,10 +292,12 @@ ENTRY(__aeabi_idiv)
287 rsbmi r0, r0, #0 292 rsbmi r0, r0, #0
288 mov pc, lr 293 mov pc, lr
289 294
295UNWIND(.fnend)
290ENDPROC(__divsi3) 296ENDPROC(__divsi3)
291ENDPROC(__aeabi_idiv) 297ENDPROC(__aeabi_idiv)
292 298
293ENTRY(__modsi3) 299ENTRY(__modsi3)
300UNWIND(.fnstart)
294 301
295 cmp r1, #0 302 cmp r1, #0
296 beq Ldiv0 303 beq Ldiv0
@@ -310,11 +317,14 @@ ENTRY(__modsi3)
310 rsbmi r0, r0, #0 317 rsbmi r0, r0, #0
311 mov pc, lr 318 mov pc, lr
312 319
320UNWIND(.fnend)
313ENDPROC(__modsi3) 321ENDPROC(__modsi3)
314 322
315#ifdef CONFIG_AEABI 323#ifdef CONFIG_AEABI
316 324
317ENTRY(__aeabi_uidivmod) 325ENTRY(__aeabi_uidivmod)
326UNWIND(.fnstart)
327UNWIND(.save {r0, r1, ip, lr} )
318 328
319 stmfd sp!, {r0, r1, ip, lr} 329 stmfd sp!, {r0, r1, ip, lr}
320 bl __aeabi_uidiv 330 bl __aeabi_uidiv
@@ -323,10 +333,12 @@ ENTRY(__aeabi_uidivmod)
323 sub r1, r1, r3 333 sub r1, r1, r3
324 mov pc, lr 334 mov pc, lr
325 335
336UNWIND(.fnend)
326ENDPROC(__aeabi_uidivmod) 337ENDPROC(__aeabi_uidivmod)
327 338
328ENTRY(__aeabi_idivmod) 339ENTRY(__aeabi_idivmod)
329 340UNWIND(.fnstart)
341UNWIND(.save {r0, r1, ip, lr} )
330 stmfd sp!, {r0, r1, ip, lr} 342 stmfd sp!, {r0, r1, ip, lr}
331 bl __aeabi_idiv 343 bl __aeabi_idiv
332 ldmfd sp!, {r1, r2, ip, lr} 344 ldmfd sp!, {r1, r2, ip, lr}
@@ -334,15 +346,18 @@ ENTRY(__aeabi_idivmod)
334 sub r1, r1, r3 346 sub r1, r1, r3
335 mov pc, lr 347 mov pc, lr
336 348
349UNWIND(.fnend)
337ENDPROC(__aeabi_idivmod) 350ENDPROC(__aeabi_idivmod)
338 351
339#endif 352#endif
340 353
341Ldiv0: 354Ldiv0:
342 355UNWIND(.fnstart)
356UNWIND(.pad #4)
357UNWIND(.save {lr})
343 str lr, [sp, #-8]! 358 str lr, [sp, #-8]!
344 bl __div0 359 bl __div0
345 mov r0, #0 @ About as wrong as it could be. 360 mov r0, #0 @ About as wrong as it could be.
346 ldr pc, [sp], #8 361 ldr pc, [sp], #8
347 362UNWIND(.fnend)
348 363ENDPROC(Ldiv0)
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 2d299bf5d72f..22484670e7ba 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -3,9 +3,6 @@ if ARCH_AT91
3config HAVE_AT91_DATAFLASH_CARD 3config HAVE_AT91_DATAFLASH_CARD
4 bool 4 bool
5 5
6config HAVE_NAND_ATMEL_BUSWIDTH_16
7 bool
8
9config HAVE_AT91_USART3 6config HAVE_AT91_USART3
10 bool 7 bool
11 8
@@ -85,11 +82,6 @@ config ARCH_AT91CAP9
85 select HAVE_FB_ATMEL 82 select HAVE_FB_ATMEL
86 select HAVE_NET_MACB 83 select HAVE_NET_MACB
87 84
88config ARCH_AT572D940HF
89 bool "AT572D940HF"
90 select CPU_ARM926T
91 select GENERIC_CLOCKEVENTS
92
93config ARCH_AT91X40 85config ARCH_AT91X40
94 bool "AT91x40" 86 bool "AT91x40"
95 select ARCH_USES_GETTIMEOFFSET 87 select ARCH_USES_GETTIMEOFFSET
@@ -209,7 +201,6 @@ comment "AT91SAM9260 / AT91SAM9XE Board Type"
209config MACH_AT91SAM9260EK 201config MACH_AT91SAM9260EK
210 bool "Atmel AT91SAM9260-EK / AT91SAM9XE Evaluation Kit" 202 bool "Atmel AT91SAM9260-EK / AT91SAM9XE Evaluation Kit"
211 select HAVE_AT91_DATAFLASH_CARD 203 select HAVE_AT91_DATAFLASH_CARD
212 select HAVE_NAND_ATMEL_BUSWIDTH_16
213 help 204 help
214 Select this if you are using Atmel's AT91SAM9260-EK or AT91SAM9XE Evaluation Kit 205 Select this if you are using Atmel's AT91SAM9260-EK or AT91SAM9XE Evaluation Kit
215 <http://www.atmel.com/dyn/products/tools_card.asp?tool_id=3933> 206 <http://www.atmel.com/dyn/products/tools_card.asp?tool_id=3933>
@@ -270,7 +261,6 @@ comment "AT91SAM9261 Board Type"
270config MACH_AT91SAM9261EK 261config MACH_AT91SAM9261EK
271 bool "Atmel AT91SAM9261-EK Evaluation Kit" 262 bool "Atmel AT91SAM9261-EK Evaluation Kit"
272 select HAVE_AT91_DATAFLASH_CARD 263 select HAVE_AT91_DATAFLASH_CARD
273 select HAVE_NAND_ATMEL_BUSWIDTH_16
274 help 264 help
275 Select this if you are using Atmel's AT91SAM9261-EK Evaluation Kit. 265 Select this if you are using Atmel's AT91SAM9261-EK Evaluation Kit.
276 <http://www.atmel.com/dyn/products/tools_card.asp?tool_id=3820> 266 <http://www.atmel.com/dyn/products/tools_card.asp?tool_id=3820>
@@ -286,7 +276,6 @@ comment "AT91SAM9G10 Board Type"
286config MACH_AT91SAM9G10EK 276config MACH_AT91SAM9G10EK
287 bool "Atmel AT91SAM9G10-EK Evaluation Kit" 277 bool "Atmel AT91SAM9G10-EK Evaluation Kit"
288 select HAVE_AT91_DATAFLASH_CARD 278 select HAVE_AT91_DATAFLASH_CARD
289 select HAVE_NAND_ATMEL_BUSWIDTH_16
290 help 279 help
291 Select this if you are using Atmel's AT91SAM9G10-EK Evaluation Kit. 280 Select this if you are using Atmel's AT91SAM9G10-EK Evaluation Kit.
292 <http://www.atmel.com/dyn/products/tools_card.asp?tool_id=4588> 281 <http://www.atmel.com/dyn/products/tools_card.asp?tool_id=4588>
@@ -302,7 +291,6 @@ comment "AT91SAM9263 Board Type"
302config MACH_AT91SAM9263EK 291config MACH_AT91SAM9263EK
303 bool "Atmel AT91SAM9263-EK Evaluation Kit" 292 bool "Atmel AT91SAM9263-EK Evaluation Kit"
304 select HAVE_AT91_DATAFLASH_CARD 293 select HAVE_AT91_DATAFLASH_CARD
305 select HAVE_NAND_ATMEL_BUSWIDTH_16
306 help 294 help
307 Select this if you are using Atmel's AT91SAM9263-EK Evaluation Kit. 295 Select this if you are using Atmel's AT91SAM9263-EK Evaluation Kit.
308 <http://www.atmel.com/dyn/products/tools_card.asp?tool_id=4057> 296 <http://www.atmel.com/dyn/products/tools_card.asp?tool_id=4057>
@@ -343,7 +331,6 @@ comment "AT91SAM9G20 Board Type"
343config MACH_AT91SAM9G20EK 331config MACH_AT91SAM9G20EK
344 bool "Atmel AT91SAM9G20-EK Evaluation Kit" 332 bool "Atmel AT91SAM9G20-EK Evaluation Kit"
345 select HAVE_AT91_DATAFLASH_CARD 333 select HAVE_AT91_DATAFLASH_CARD
346 select HAVE_NAND_ATMEL_BUSWIDTH_16
347 help 334 help
348 Select this if you are using Atmel's AT91SAM9G20-EK Evaluation Kit 335 Select this if you are using Atmel's AT91SAM9G20-EK Evaluation Kit
349 that embeds only one SD/MMC slot. 336 that embeds only one SD/MMC slot.
@@ -351,7 +338,6 @@ config MACH_AT91SAM9G20EK
351config MACH_AT91SAM9G20EK_2MMC 338config MACH_AT91SAM9G20EK_2MMC
352 depends on MACH_AT91SAM9G20EK 339 depends on MACH_AT91SAM9G20EK
353 bool "Atmel AT91SAM9G20-EK Evaluation Kit with 2 SD/MMC Slots" 340 bool "Atmel AT91SAM9G20-EK Evaluation Kit with 2 SD/MMC Slots"
354 select HAVE_NAND_ATMEL_BUSWIDTH_16
355 help 341 help
356 Select this if you are using an Atmel AT91SAM9G20-EK Evaluation Kit 342 Select this if you are using an Atmel AT91SAM9G20-EK Evaluation Kit
357 with 2 SD/MMC Slots. This is the case for AT91SAM9G20-EK rev. C and 343 with 2 SD/MMC Slots. This is the case for AT91SAM9G20-EK rev. C and
@@ -416,7 +402,6 @@ comment "AT91SAM9G45 Board Type"
416 402
417config MACH_AT91SAM9M10G45EK 403config MACH_AT91SAM9M10G45EK
418 bool "Atmel AT91SAM9M10G45-EK Evaluation Kits" 404 bool "Atmel AT91SAM9M10G45-EK Evaluation Kits"
419 select HAVE_NAND_ATMEL_BUSWIDTH_16
420 help 405 help
421 Select this if you are using Atmel's AT91SAM9G45-EKES Evaluation Kit. 406 Select this if you are using Atmel's AT91SAM9G45-EKES Evaluation Kit.
422 "ES" at the end of the name means that this board is an 407 "ES" at the end of the name means that this board is an
@@ -433,7 +418,6 @@ comment "AT91CAP9 Board Type"
433config MACH_AT91CAP9ADK 418config MACH_AT91CAP9ADK
434 bool "Atmel AT91CAP9A-DK Evaluation Kit" 419 bool "Atmel AT91CAP9A-DK Evaluation Kit"
435 select HAVE_AT91_DATAFLASH_CARD 420 select HAVE_AT91_DATAFLASH_CARD
436 select HAVE_NAND_ATMEL_BUSWIDTH_16
437 help 421 help
438 Select this if you are using Atmel's AT91CAP9A-DK Evaluation Kit. 422 Select this if you are using Atmel's AT91CAP9A-DK Evaluation Kit.
439 <http://www.atmel.com/dyn/products/tools_card.asp?tool_id=4138> 423 <http://www.atmel.com/dyn/products/tools_card.asp?tool_id=4138>
@@ -442,23 +426,6 @@ endif
442 426
443# ---------------------------------------------------------- 427# ----------------------------------------------------------
444 428
445if ARCH_AT572D940HF
446
447comment "AT572D940HF Board Type"
448
449config MACH_AT572D940HFEB
450 bool "AT572D940HF-EK"
451 depends on ARCH_AT572D940HF
452 select HAVE_AT91_DATAFLASH_CARD
453 select HAVE_NAND_ATMEL_BUSWIDTH_16
454 help
455 Select this if you are using Atmel's AT572D940HF-EK evaluation kit.
456 <http://www.atmel.com/products/diopsis/default.asp>
457
458endif
459
460# ----------------------------------------------------------
461
462if ARCH_AT91X40 429if ARCH_AT91X40
463 430
464comment "AT91X40 Board Type" 431comment "AT91X40 Board Type"
@@ -483,13 +450,6 @@ config MTD_AT91_DATAFLASH_CARD
483 help 450 help
484 Enable support for the DataFlash card. 451 Enable support for the DataFlash card.
485 452
486config MTD_NAND_ATMEL_BUSWIDTH_16
487 bool "Enable 16-bit data bus interface to NAND flash"
488 depends on HAVE_NAND_ATMEL_BUSWIDTH_16
489 help
490 On AT91SAM926x boards both types of NAND flash can be present
491 (8 and 16 bit data bus width).
492
493# ---------------------------------------------------------- 453# ----------------------------------------------------------
494 454
495comment "AT91 Feature Selections" 455comment "AT91 Feature Selections"
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index a83835e0c185..96966231920c 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devi
19obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o at91sam9_alt_reset.o 19obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o at91sam9_alt_reset.o
20obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o 20obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o
21obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o 21obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o
22obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o
23obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o 22obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o
24 23
25# AT91RM9200 board-specific support 24# AT91RM9200 board-specific support
@@ -78,9 +77,6 @@ obj-$(CONFIG_MACH_AT91SAM9M10G45EK) += board-sam9m10g45ek.o
78# AT91CAP9 board-specific support 77# AT91CAP9 board-specific support
79obj-$(CONFIG_MACH_AT91CAP9ADK) += board-cap9adk.o 78obj-$(CONFIG_MACH_AT91CAP9ADK) += board-cap9adk.o
80 79
81# AT572D940HF board-specific support
82obj-$(CONFIG_MACH_AT572D940HFEB) += board-at572d940hf_ek.o
83
84# AT91X40 board-specific support 80# AT91X40 board-specific support
85obj-$(CONFIG_MACH_AT91EB01) += board-eb01.o 81obj-$(CONFIG_MACH_AT91EB01) += board-eb01.o
86 82
diff --git a/arch/arm/mach-at91/at572d940hf.c b/arch/arm/mach-at91/at572d940hf.c
deleted file mode 100644
index a6b9c68c003a..000000000000
--- a/arch/arm/mach-at91/at572d940hf.c
+++ /dev/null
@@ -1,377 +0,0 @@
1/*
2 * arch/arm/mach-at91/at572d940hf.c
3 *
4 * Antonio R. Costa <costa.antonior@gmail.com>
5 * Copyright (C) 2008 Atmel
6 *
7 * Copyright (C) 2005 SAN People
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
25#include <linux/module.h>
26
27#include <asm/mach/irq.h>
28#include <asm/mach/arch.h>
29#include <asm/mach/map.h>
30#include <mach/at572d940hf.h>
31#include <mach/at91_pmc.h>
32#include <mach/at91_rstc.h>
33
34#include "generic.h"
35#include "clock.h"
36
37static struct map_desc at572d940hf_io_desc[] __initdata = {
38 {
39 .virtual = AT91_VA_BASE_SYS,
40 .pfn = __phys_to_pfn(AT91_BASE_SYS),
41 .length = SZ_16K,
42 .type = MT_DEVICE,
43 }, {
44 .virtual = AT91_IO_VIRT_BASE - AT572D940HF_SRAM_SIZE,
45 .pfn = __phys_to_pfn(AT572D940HF_SRAM_BASE),
46 .length = AT572D940HF_SRAM_SIZE,
47 .type = MT_DEVICE,
48 },
49};
50
51/* --------------------------------------------------------------------
52 * Clocks
53 * -------------------------------------------------------------------- */
54
55/*
56 * The peripheral clocks.
57 */
58static struct clk pioA_clk = {
59 .name = "pioA_clk",
60 .pmc_mask = 1 << AT572D940HF_ID_PIOA,
61 .type = CLK_TYPE_PERIPHERAL,
62};
63static struct clk pioB_clk = {
64 .name = "pioB_clk",
65 .pmc_mask = 1 << AT572D940HF_ID_PIOB,
66 .type = CLK_TYPE_PERIPHERAL,
67};
68static struct clk pioC_clk = {
69 .name = "pioC_clk",
70 .pmc_mask = 1 << AT572D940HF_ID_PIOC,
71 .type = CLK_TYPE_PERIPHERAL,
72};
73static struct clk macb_clk = {
74 .name = "macb_clk",
75 .pmc_mask = 1 << AT572D940HF_ID_EMAC,
76 .type = CLK_TYPE_PERIPHERAL,
77};
78static struct clk usart0_clk = {
79 .name = "usart0_clk",
80 .pmc_mask = 1 << AT572D940HF_ID_US0,
81 .type = CLK_TYPE_PERIPHERAL,
82};
83static struct clk usart1_clk = {
84 .name = "usart1_clk",
85 .pmc_mask = 1 << AT572D940HF_ID_US1,
86 .type = CLK_TYPE_PERIPHERAL,
87};
88static struct clk usart2_clk = {
89 .name = "usart2_clk",
90 .pmc_mask = 1 << AT572D940HF_ID_US2,
91 .type = CLK_TYPE_PERIPHERAL,
92};
93static struct clk mmc_clk = {
94 .name = "mci_clk",
95 .pmc_mask = 1 << AT572D940HF_ID_MCI,
96 .type = CLK_TYPE_PERIPHERAL,
97};
98static struct clk udc_clk = {
99 .name = "udc_clk",
100 .pmc_mask = 1 << AT572D940HF_ID_UDP,
101 .type = CLK_TYPE_PERIPHERAL,
102};
103static struct clk twi0_clk = {
104 .name = "twi0_clk",
105 .pmc_mask = 1 << AT572D940HF_ID_TWI0,
106 .type = CLK_TYPE_PERIPHERAL,
107};
108static struct clk spi0_clk = {
109 .name = "spi0_clk",
110 .pmc_mask = 1 << AT572D940HF_ID_SPI0,
111 .type = CLK_TYPE_PERIPHERAL,
112};
113static struct clk spi1_clk = {
114 .name = "spi1_clk",
115 .pmc_mask = 1 << AT572D940HF_ID_SPI1,
116 .type = CLK_TYPE_PERIPHERAL,
117};
118static struct clk ssc0_clk = {
119 .name = "ssc0_clk",
120 .pmc_mask = 1 << AT572D940HF_ID_SSC0,
121 .type = CLK_TYPE_PERIPHERAL,
122};
123static struct clk ssc1_clk = {
124 .name = "ssc1_clk",
125 .pmc_mask = 1 << AT572D940HF_ID_SSC1,
126 .type = CLK_TYPE_PERIPHERAL,
127};
128static struct clk ssc2_clk = {
129 .name = "ssc2_clk",
130 .pmc_mask = 1 << AT572D940HF_ID_SSC2,
131 .type = CLK_TYPE_PERIPHERAL,
132};
133static struct clk tc0_clk = {
134 .name = "tc0_clk",
135 .pmc_mask = 1 << AT572D940HF_ID_TC0,
136 .type = CLK_TYPE_PERIPHERAL,
137};
138static struct clk tc1_clk = {
139 .name = "tc1_clk",
140 .pmc_mask = 1 << AT572D940HF_ID_TC1,
141 .type = CLK_TYPE_PERIPHERAL,
142};
143static struct clk tc2_clk = {
144 .name = "tc2_clk",
145 .pmc_mask = 1 << AT572D940HF_ID_TC2,
146 .type = CLK_TYPE_PERIPHERAL,
147};
148static struct clk ohci_clk = {
149 .name = "ohci_clk",
150 .pmc_mask = 1 << AT572D940HF_ID_UHP,
151 .type = CLK_TYPE_PERIPHERAL,
152};
153static struct clk ssc3_clk = {
154 .name = "ssc3_clk",
155 .pmc_mask = 1 << AT572D940HF_ID_SSC3,
156 .type = CLK_TYPE_PERIPHERAL,
157};
158static struct clk twi1_clk = {
159 .name = "twi1_clk",
160 .pmc_mask = 1 << AT572D940HF_ID_TWI1,
161 .type = CLK_TYPE_PERIPHERAL,
162};
163static struct clk can0_clk = {
164 .name = "can0_clk",
165 .pmc_mask = 1 << AT572D940HF_ID_CAN0,
166 .type = CLK_TYPE_PERIPHERAL,
167};
168static struct clk can1_clk = {
169 .name = "can1_clk",
170 .pmc_mask = 1 << AT572D940HF_ID_CAN1,
171 .type = CLK_TYPE_PERIPHERAL,
172};
173static struct clk mAgicV_clk = {
174 .name = "mAgicV_clk",
175 .pmc_mask = 1 << AT572D940HF_ID_MSIRQ0,
176 .type = CLK_TYPE_PERIPHERAL,
177};
178
179
180static struct clk *periph_clocks[] __initdata = {
181 &pioA_clk,
182 &pioB_clk,
183 &pioC_clk,
184 &macb_clk,
185 &usart0_clk,
186 &usart1_clk,
187 &usart2_clk,
188 &mmc_clk,
189 &udc_clk,
190 &twi0_clk,
191 &spi0_clk,
192 &spi1_clk,
193 &ssc0_clk,
194 &ssc1_clk,
195 &ssc2_clk,
196 &tc0_clk,
197 &tc1_clk,
198 &tc2_clk,
199 &ohci_clk,
200 &ssc3_clk,
201 &twi1_clk,
202 &can0_clk,
203 &can1_clk,
204 &mAgicV_clk,
205 /* irq0 .. irq2 */
206};
207
208/*
209 * The five programmable clocks.
210 * You must configure pin multiplexing to bring these signals out.
211 */
212static struct clk pck0 = {
213 .name = "pck0",
214 .pmc_mask = AT91_PMC_PCK0,
215 .type = CLK_TYPE_PROGRAMMABLE,
216 .id = 0,
217};
218static struct clk pck1 = {
219 .name = "pck1",
220 .pmc_mask = AT91_PMC_PCK1,
221 .type = CLK_TYPE_PROGRAMMABLE,
222 .id = 1,
223};
224static struct clk pck2 = {
225 .name = "pck2",
226 .pmc_mask = AT91_PMC_PCK2,
227 .type = CLK_TYPE_PROGRAMMABLE,
228 .id = 2,
229};
230static struct clk pck3 = {
231 .name = "pck3",
232 .pmc_mask = AT91_PMC_PCK3,
233 .type = CLK_TYPE_PROGRAMMABLE,
234 .id = 3,
235};
236
237static struct clk mAgicV_mem_clk = {
238 .name = "mAgicV_mem_clk",
239 .pmc_mask = AT91_PMC_PCK4,
240 .type = CLK_TYPE_PROGRAMMABLE,
241 .id = 4,
242};
243
244/* HClocks */
245static struct clk hck0 = {
246 .name = "hck0",
247 .pmc_mask = AT91_PMC_HCK0,
248 .type = CLK_TYPE_SYSTEM,
249 .id = 0,
250};
251static struct clk hck1 = {
252 .name = "hck1",
253 .pmc_mask = AT91_PMC_HCK1,
254 .type = CLK_TYPE_SYSTEM,
255 .id = 1,
256};
257
258static void __init at572d940hf_register_clocks(void)
259{
260 int i;
261
262 for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
263 clk_register(periph_clocks[i]);
264
265 clk_register(&pck0);
266 clk_register(&pck1);
267 clk_register(&pck2);
268 clk_register(&pck3);
269 clk_register(&mAgicV_mem_clk);
270
271 clk_register(&hck0);
272 clk_register(&hck1);
273}
274
275/* --------------------------------------------------------------------
276 * GPIO
277 * -------------------------------------------------------------------- */
278
279static struct at91_gpio_bank at572d940hf_gpio[] = {
280 {
281 .id = AT572D940HF_ID_PIOA,
282 .offset = AT91_PIOA,
283 .clock = &pioA_clk,
284 }, {
285 .id = AT572D940HF_ID_PIOB,
286 .offset = AT91_PIOB,
287 .clock = &pioB_clk,
288 }, {
289 .id = AT572D940HF_ID_PIOC,
290 .offset = AT91_PIOC,
291 .clock = &pioC_clk,
292 }
293};
294
295static void at572d940hf_reset(void)
296{
297 at91_sys_write(AT91_RSTC_CR, AT91_RSTC_KEY | AT91_RSTC_PROCRST | AT91_RSTC_PERRST);
298}
299
300
301/* --------------------------------------------------------------------
302 * AT572D940HF processor initialization
303 * -------------------------------------------------------------------- */
304
305void __init at572d940hf_initialize(unsigned long main_clock)
306{
307 /* Map peripherals */
308 iotable_init(at572d940hf_io_desc, ARRAY_SIZE(at572d940hf_io_desc));
309
310 at91_arch_reset = at572d940hf_reset;
311 at91_extern_irq = (1 << AT572D940HF_ID_IRQ0) | (1 << AT572D940HF_ID_IRQ1)
312 | (1 << AT572D940HF_ID_IRQ2);
313
314 /* Init clock subsystem */
315 at91_clock_init(main_clock);
316
317 /* Register the processor-specific clocks */
318 at572d940hf_register_clocks();
319
320 /* Register GPIO subsystem */
321 at91_gpio_init(at572d940hf_gpio, 3);
322}
323
324/* --------------------------------------------------------------------
325 * Interrupt initialization
326 * -------------------------------------------------------------------- */
327
328/*
329 * The default interrupt priority levels (0 = lowest, 7 = highest).
330 */
331static unsigned int at572d940hf_default_irq_priority[NR_AIC_IRQS] __initdata = {
332 7, /* Advanced Interrupt Controller */
333 7, /* System Peripherals */
334 0, /* Parallel IO Controller A */
335 0, /* Parallel IO Controller B */
336 0, /* Parallel IO Controller C */
337 3, /* Ethernet */
338 6, /* USART 0 */
339 6, /* USART 1 */
340 6, /* USART 2 */
341 0, /* Multimedia Card Interface */
342 4, /* USB Device Port */
343 0, /* Two-Wire Interface 0 */
344 6, /* Serial Peripheral Interface 0 */
345 6, /* Serial Peripheral Interface 1 */
346 5, /* Serial Synchronous Controller 0 */
347 5, /* Serial Synchronous Controller 1 */
348 5, /* Serial Synchronous Controller 2 */
349 0, /* Timer Counter 0 */
350 0, /* Timer Counter 1 */
351 0, /* Timer Counter 2 */
352 3, /* USB Host port */
353 3, /* Serial Synchronous Controller 3 */
354 0, /* Two-Wire Interface 1 */
355 0, /* CAN Controller 0 */
356 0, /* CAN Controller 1 */
357 0, /* mAgicV HALT line */
358 0, /* mAgicV SIRQ0 line */
359 0, /* mAgicV exception line */
360 0, /* mAgicV end of DMA line */
361 0, /* Advanced Interrupt Controller */
362 0, /* Advanced Interrupt Controller */
363 0, /* Advanced Interrupt Controller */
364};
365
366void __init at572d940hf_init_interrupts(unsigned int priority[NR_AIC_IRQS])
367{
368 if (!priority)
369 priority = at572d940hf_default_irq_priority;
370
371 /* Initialize the AIC interrupt controller */
372 at91_aic_init(priority);
373
374 /* Enable GPIO interrupts */
375 at91_gpio_irq_setup();
376}
377
diff --git a/arch/arm/mach-at91/at572d940hf_devices.c b/arch/arm/mach-at91/at572d940hf_devices.c
deleted file mode 100644
index 0fc20a240782..000000000000
--- a/arch/arm/mach-at91/at572d940hf_devices.c
+++ /dev/null
@@ -1,970 +0,0 @@
1/*
2 * arch/arm/mach-at91/at572d940hf_devices.c
3 *
4 * Copyright (C) 2008 Atmel Antonio R. Costa <costa.antonior@gmail.com>
5 * Copyright (C) 2005 Thibaut VARENE <varenet@parisc-linux.org>
6 * Copyright (C) 2005 David Brownell
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <asm/mach/arch.h>
25#include <asm/mach/map.h>
26
27#include <linux/dma-mapping.h>
28#include <linux/platform_device.h>
29
30#include <mach/board.h>
31#include <mach/gpio.h>
32#include <mach/at572d940hf.h>
33#include <mach/at572d940hf_matrix.h>
34#include <mach/at91sam9_smc.h>
35
36#include "generic.h"
37#include "sam9_smc.h"
38
39
40/* --------------------------------------------------------------------
41 * USB Host
42 * -------------------------------------------------------------------- */
43
44#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
45static u64 ohci_dmamask = DMA_BIT_MASK(32);
46static struct at91_usbh_data usbh_data;
47
48static struct resource usbh_resources[] = {
49 [0] = {
50 .start = AT572D940HF_UHP_BASE,
51 .end = AT572D940HF_UHP_BASE + SZ_1M - 1,
52 .flags = IORESOURCE_MEM,
53 },
54 [1] = {
55 .start = AT572D940HF_ID_UHP,
56 .end = AT572D940HF_ID_UHP,
57 .flags = IORESOURCE_IRQ,
58 },
59};
60
61static struct platform_device at572d940hf_usbh_device = {
62 .name = "at91_ohci",
63 .id = -1,
64 .dev = {
65 .dma_mask = &ohci_dmamask,
66 .coherent_dma_mask = DMA_BIT_MASK(32),
67 .platform_data = &usbh_data,
68 },
69 .resource = usbh_resources,
70 .num_resources = ARRAY_SIZE(usbh_resources),
71};
72
73void __init at91_add_device_usbh(struct at91_usbh_data *data)
74{
75 if (!data)
76 return;
77
78 usbh_data = *data;
79 platform_device_register(&at572d940hf_usbh_device);
80
81}
82#else
83void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
84#endif
85
86
87/* --------------------------------------------------------------------
88 * USB Device (Gadget)
89 * -------------------------------------------------------------------- */
90
91#ifdef CONFIG_USB_GADGET_AT91
92static struct at91_udc_data udc_data;
93
94static struct resource udc_resources[] = {
95 [0] = {
96 .start = AT572D940HF_BASE_UDP,
97 .end = AT572D940HF_BASE_UDP + SZ_16K - 1,
98 .flags = IORESOURCE_MEM,
99 },
100 [1] = {
101 .start = AT572D940HF_ID_UDP,
102 .end = AT572D940HF_ID_UDP,
103 .flags = IORESOURCE_IRQ,
104 },
105};
106
107static struct platform_device at572d940hf_udc_device = {
108 .name = "at91_udc",
109 .id = -1,
110 .dev = {
111 .platform_data = &udc_data,
112 },
113 .resource = udc_resources,
114 .num_resources = ARRAY_SIZE(udc_resources),
115};
116
117void __init at91_add_device_udc(struct at91_udc_data *data)
118{
119 if (!data)
120 return;
121
122 if (data->vbus_pin) {
123 at91_set_gpio_input(data->vbus_pin, 0);
124 at91_set_deglitch(data->vbus_pin, 1);
125 }
126
127 /* Pullup pin is handled internally */
128
129 udc_data = *data;
130 platform_device_register(&at572d940hf_udc_device);
131}
132#else
133void __init at91_add_device_udc(struct at91_udc_data *data) {}
134#endif
135
136
137/* --------------------------------------------------------------------
138 * Ethernet
139 * -------------------------------------------------------------------- */
140
141#if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE)
142static u64 eth_dmamask = DMA_BIT_MASK(32);
143static struct at91_eth_data eth_data;
144
145static struct resource eth_resources[] = {
146 [0] = {
147 .start = AT572D940HF_BASE_EMAC,
148 .end = AT572D940HF_BASE_EMAC + SZ_16K - 1,
149 .flags = IORESOURCE_MEM,
150 },
151 [1] = {
152 .start = AT572D940HF_ID_EMAC,
153 .end = AT572D940HF_ID_EMAC,
154 .flags = IORESOURCE_IRQ,
155 },
156};
157
158static struct platform_device at572d940hf_eth_device = {
159 .name = "macb",
160 .id = -1,
161 .dev = {
162 .dma_mask = &eth_dmamask,
163 .coherent_dma_mask = DMA_BIT_MASK(32),
164 .platform_data = &eth_data,
165 },
166 .resource = eth_resources,
167 .num_resources = ARRAY_SIZE(eth_resources),
168};
169
170void __init at91_add_device_eth(struct at91_eth_data *data)
171{
172 if (!data)
173 return;
174
175 if (data->phy_irq_pin) {
176 at91_set_gpio_input(data->phy_irq_pin, 0);
177 at91_set_deglitch(data->phy_irq_pin, 1);
178 }
179
180 /* Only RMII is supported */
181 data->is_rmii = 1;
182
183 /* Pins used for RMII */
184 at91_set_A_periph(AT91_PIN_PA16, 0); /* ETXCK_EREFCK */
185 at91_set_A_periph(AT91_PIN_PA17, 0); /* ERXDV */
186 at91_set_A_periph(AT91_PIN_PA18, 0); /* ERX0 */
187 at91_set_A_periph(AT91_PIN_PA19, 0); /* ERX1 */
188 at91_set_A_periph(AT91_PIN_PA20, 0); /* ERXER */
189 at91_set_A_periph(AT91_PIN_PA23, 0); /* ETXEN */
190 at91_set_A_periph(AT91_PIN_PA21, 0); /* ETX0 */
191 at91_set_A_periph(AT91_PIN_PA22, 0); /* ETX1 */
192 at91_set_A_periph(AT91_PIN_PA13, 0); /* EMDIO */
193 at91_set_A_periph(AT91_PIN_PA14, 0); /* EMDC */
194
195 eth_data = *data;
196 platform_device_register(&at572d940hf_eth_device);
197}
198#else
199void __init at91_add_device_eth(struct at91_eth_data *data) {}
200#endif
201
202
203/* --------------------------------------------------------------------
204 * MMC / SD
205 * -------------------------------------------------------------------- */
206
207#if defined(CONFIG_MMC_AT91) || defined(CONFIG_MMC_AT91_MODULE)
208static u64 mmc_dmamask = DMA_BIT_MASK(32);
209static struct at91_mmc_data mmc_data;
210
211static struct resource mmc_resources[] = {
212 [0] = {
213 .start = AT572D940HF_BASE_MCI,
214 .end = AT572D940HF_BASE_MCI + SZ_16K - 1,
215 .flags = IORESOURCE_MEM,
216 },
217 [1] = {
218 .start = AT572D940HF_ID_MCI,
219 .end = AT572D940HF_ID_MCI,
220 .flags = IORESOURCE_IRQ,
221 },
222};
223
224static struct platform_device at572d940hf_mmc_device = {
225 .name = "at91_mci",
226 .id = -1,
227 .dev = {
228 .dma_mask = &mmc_dmamask,
229 .coherent_dma_mask = DMA_BIT_MASK(32),
230 .platform_data = &mmc_data,
231 },
232 .resource = mmc_resources,
233 .num_resources = ARRAY_SIZE(mmc_resources),
234};
235
236void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
237{
238 if (!data)
239 return;
240
241 /* input/irq */
242 if (data->det_pin) {
243 at91_set_gpio_input(data->det_pin, 1);
244 at91_set_deglitch(data->det_pin, 1);
245 }
246 if (data->wp_pin)
247 at91_set_gpio_input(data->wp_pin, 1);
248 if (data->vcc_pin)
249 at91_set_gpio_output(data->vcc_pin, 0);
250
251 /* CLK */
252 at91_set_A_periph(AT91_PIN_PC22, 0);
253
254 /* CMD */
255 at91_set_A_periph(AT91_PIN_PC23, 1);
256
257 /* DAT0, maybe DAT1..DAT3 */
258 at91_set_A_periph(AT91_PIN_PC24, 1);
259 if (data->wire4) {
260 at91_set_A_periph(AT91_PIN_PC25, 1);
261 at91_set_A_periph(AT91_PIN_PC26, 1);
262 at91_set_A_periph(AT91_PIN_PC27, 1);
263 }
264
265 mmc_data = *data;
266 platform_device_register(&at572d940hf_mmc_device);
267}
268#else
269void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data) {}
270#endif
271
272
273/* --------------------------------------------------------------------
274 * NAND / SmartMedia
275 * -------------------------------------------------------------------- */
276
277#if defined(CONFIG_MTD_NAND_ATMEL) || defined(CONFIG_MTD_NAND_ATMEL_MODULE)
278static struct atmel_nand_data nand_data;
279
280#define NAND_BASE AT91_CHIPSELECT_3
281
282static struct resource nand_resources[] = {
283 {
284 .start = NAND_BASE,
285 .end = NAND_BASE + SZ_256M - 1,
286 .flags = IORESOURCE_MEM,
287 }
288};
289
290static struct platform_device at572d940hf_nand_device = {
291 .name = "atmel_nand",
292 .id = -1,
293 .dev = {
294 .platform_data = &nand_data,
295 },
296 .resource = nand_resources,
297 .num_resources = ARRAY_SIZE(nand_resources),
298};
299
300void __init at91_add_device_nand(struct atmel_nand_data *data)
301{
302 unsigned long csa;
303
304 if (!data)
305 return;
306
307 csa = at91_sys_read(AT91_MATRIX_EBICSA);
308 at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_CS3A_SMC_SMARTMEDIA);
309
310 /* enable pin */
311 if (data->enable_pin)
312 at91_set_gpio_output(data->enable_pin, 1);
313
314 /* ready/busy pin */
315 if (data->rdy_pin)
316 at91_set_gpio_input(data->rdy_pin, 1);
317
318 /* card detect pin */
319 if (data->det_pin)
320 at91_set_gpio_input(data->det_pin, 1);
321
322 at91_set_A_periph(AT91_PIN_PB28, 0); /* A[22] */
323 at91_set_B_periph(AT91_PIN_PA28, 0); /* NANDOE */
324 at91_set_B_periph(AT91_PIN_PA29, 0); /* NANDWE */
325
326 nand_data = *data;
327 platform_device_register(&at572d940hf_nand_device);
328}
329
330#else
331void __init at91_add_device_nand(struct atmel_nand_data *data) {}
332#endif
333
334
335/* --------------------------------------------------------------------
336 * TWI (i2c)
337 * -------------------------------------------------------------------- */
338
339/*
340 * Prefer the GPIO code since the TWI controller isn't robust
341 * (gets overruns and underruns under load) and can only issue
342 * repeated STARTs in one scenario (the driver doesn't yet handle them).
343 */
344
345#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE)
346
347static struct i2c_gpio_platform_data pdata = {
348 .sda_pin = AT91_PIN_PC7,
349 .sda_is_open_drain = 1,
350 .scl_pin = AT91_PIN_PC8,
351 .scl_is_open_drain = 1,
352 .udelay = 2, /* ~100 kHz */
353};
354
355static struct platform_device at572d940hf_twi_device {
356 .name = "i2c-gpio",
357 .id = -1,
358 .dev.platform_data = &pdata,
359};
360
361void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices)
362{
363 at91_set_GPIO_periph(AT91_PIN_PC7, 1); /* TWD (SDA) */
364 at91_set_multi_drive(AT91_PIN_PC7, 1);
365
366 at91_set_GPIO_periph(AT91_PIN_PA8, 1); /* TWCK (SCL) */
367 at91_set_multi_drive(AT91_PIN_PC8, 1);
368
369 i2c_register_board_info(0, devices, nr_devices);
370 platform_device_register(&at572d940hf_twi_device);
371}
372
373#elif defined(CONFIG_I2C_AT91) || defined(CONFIG_I2C_AT91_MODULE)
374
375static struct resource twi0_resources[] = {
376 [0] = {
377 .start = AT572D940HF_BASE_TWI0,
378 .end = AT572D940HF_BASE_TWI0 + SZ_16K - 1,
379 .flags = IORESOURCE_MEM,
380 },
381 [1] = {
382 .start = AT572D940HF_ID_TWI0,
383 .end = AT572D940HF_ID_TWI0,
384 .flags = IORESOURCE_IRQ,
385 },
386};
387
388static struct platform_device at572d940hf_twi0_device = {
389 .name = "at91_i2c",
390 .id = 0,
391 .resource = twi0_resources,
392 .num_resources = ARRAY_SIZE(twi0_resources),
393};
394
395static struct resource twi1_resources[] = {
396 [0] = {
397 .start = AT572D940HF_BASE_TWI1,
398 .end = AT572D940HF_BASE_TWI1 + SZ_16K - 1,
399 .flags = IORESOURCE_MEM,
400 },
401 [1] = {
402 .start = AT572D940HF_ID_TWI1,
403 .end = AT572D940HF_ID_TWI1,
404 .flags = IORESOURCE_IRQ,
405 },
406};
407
408static struct platform_device at572d940hf_twi1_device = {
409 .name = "at91_i2c",
410 .id = 1,
411 .resource = twi1_resources,
412 .num_resources = ARRAY_SIZE(twi1_resources),
413};
414
415void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices)
416{
417 /* pins used for TWI0 interface */
418 at91_set_A_periph(AT91_PIN_PC7, 0); /* TWD */
419 at91_set_multi_drive(AT91_PIN_PC7, 1);
420
421 at91_set_A_periph(AT91_PIN_PC8, 0); /* TWCK */
422 at91_set_multi_drive(AT91_PIN_PC8, 1);
423
424 /* pins used for TWI1 interface */
425 at91_set_A_periph(AT91_PIN_PC20, 0); /* TWD */
426 at91_set_multi_drive(AT91_PIN_PC20, 1);
427
428 at91_set_A_periph(AT91_PIN_PC21, 0); /* TWCK */
429 at91_set_multi_drive(AT91_PIN_PC21, 1);
430
431 i2c_register_board_info(0, devices, nr_devices);
432 platform_device_register(&at572d940hf_twi0_device);
433 platform_device_register(&at572d940hf_twi1_device);
434}
435#else
436void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices) {}
437#endif
438
439
440/* --------------------------------------------------------------------
441 * SPI
442 * -------------------------------------------------------------------- */
443
444#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE)
445static u64 spi_dmamask = DMA_BIT_MASK(32);
446
447static struct resource spi0_resources[] = {
448 [0] = {
449 .start = AT572D940HF_BASE_SPI0,
450 .end = AT572D940HF_BASE_SPI0 + SZ_16K - 1,
451 .flags = IORESOURCE_MEM,
452 },
453 [1] = {
454 .start = AT572D940HF_ID_SPI0,
455 .end = AT572D940HF_ID_SPI0,
456 .flags = IORESOURCE_IRQ,
457 },
458};
459
460static struct platform_device at572d940hf_spi0_device = {
461 .name = "atmel_spi",
462 .id = 0,
463 .dev = {
464 .dma_mask = &spi_dmamask,
465 .coherent_dma_mask = DMA_BIT_MASK(32),
466 },
467 .resource = spi0_resources,
468 .num_resources = ARRAY_SIZE(spi0_resources),
469};
470
471static const unsigned spi0_standard_cs[4] = { AT91_PIN_PA3, AT91_PIN_PA4, AT91_PIN_PA5, AT91_PIN_PA6 };
472
473static struct resource spi1_resources[] = {
474 [0] = {
475 .start = AT572D940HF_BASE_SPI1,
476 .end = AT572D940HF_BASE_SPI1 + SZ_16K - 1,
477 .flags = IORESOURCE_MEM,
478 },
479 [1] = {
480 .start = AT572D940HF_ID_SPI1,
481 .end = AT572D940HF_ID_SPI1,
482 .flags = IORESOURCE_IRQ,
483 },
484};
485
486static struct platform_device at572d940hf_spi1_device = {
487 .name = "atmel_spi",
488 .id = 1,
489 .dev = {
490 .dma_mask = &spi_dmamask,
491 .coherent_dma_mask = DMA_BIT_MASK(32),
492 },
493 .resource = spi1_resources,
494 .num_resources = ARRAY_SIZE(spi1_resources),
495};
496
497static const unsigned spi1_standard_cs[4] = { AT91_PIN_PC3, AT91_PIN_PC4, AT91_PIN_PC5, AT91_PIN_PC6 };
498
499void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
500{
501 int i;
502 unsigned long cs_pin;
503 short enable_spi0 = 0;
504 short enable_spi1 = 0;
505
506 /* Choose SPI chip-selects */
507 for (i = 0; i < nr_devices; i++) {
508 if (devices[i].controller_data)
509 cs_pin = (unsigned long) devices[i].controller_data;
510 else if (devices[i].bus_num == 0)
511 cs_pin = spi0_standard_cs[devices[i].chip_select];
512 else
513 cs_pin = spi1_standard_cs[devices[i].chip_select];
514
515 if (devices[i].bus_num == 0)
516 enable_spi0 = 1;
517 else
518 enable_spi1 = 1;
519
520 /* enable chip-select pin */
521 at91_set_gpio_output(cs_pin, 1);
522
523 /* pass chip-select pin to driver */
524 devices[i].controller_data = (void *) cs_pin;
525 }
526
527 spi_register_board_info(devices, nr_devices);
528
529 /* Configure SPI bus(es) */
530 if (enable_spi0) {
531 at91_set_A_periph(AT91_PIN_PA0, 0); /* SPI0_MISO */
532 at91_set_A_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */
533 at91_set_A_periph(AT91_PIN_PA2, 0); /* SPI0_SPCK */
534
535 at91_clock_associate("spi0_clk", &at572d940hf_spi0_device.dev, "spi_clk");
536 platform_device_register(&at572d940hf_spi0_device);
537 }
538 if (enable_spi1) {
539 at91_set_A_periph(AT91_PIN_PC0, 0); /* SPI1_MISO */
540 at91_set_A_periph(AT91_PIN_PC1, 0); /* SPI1_MOSI */
541 at91_set_A_periph(AT91_PIN_PC2, 0); /* SPI1_SPCK */
542
543 at91_clock_associate("spi1_clk", &at572d940hf_spi1_device.dev, "spi_clk");
544 platform_device_register(&at572d940hf_spi1_device);
545 }
546}
547#else
548void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) {}
549#endif
550
551
552/* --------------------------------------------------------------------
553 * Timer/Counter blocks
554 * -------------------------------------------------------------------- */
555
556#ifdef CONFIG_ATMEL_TCLIB
557
558static struct resource tcb_resources[] = {
559 [0] = {
560 .start = AT572D940HF_BASE_TCB,
561 .end = AT572D940HF_BASE_TCB + SZ_16K - 1,
562 .flags = IORESOURCE_MEM,
563 },
564 [1] = {
565 .start = AT572D940HF_ID_TC0,
566 .end = AT572D940HF_ID_TC0,
567 .flags = IORESOURCE_IRQ,
568 },
569 [2] = {
570 .start = AT572D940HF_ID_TC1,
571 .end = AT572D940HF_ID_TC1,
572 .flags = IORESOURCE_IRQ,
573 },
574 [3] = {
575 .start = AT572D940HF_ID_TC2,
576 .end = AT572D940HF_ID_TC2,
577 .flags = IORESOURCE_IRQ,
578 },
579};
580
581static struct platform_device at572d940hf_tcb_device = {
582 .name = "atmel_tcb",
583 .id = 0,
584 .resource = tcb_resources,
585 .num_resources = ARRAY_SIZE(tcb_resources),
586};
587
588static void __init at91_add_device_tc(void)
589{
590 /* this chip has a separate clock and irq for each TC channel */
591 at91_clock_associate("tc0_clk", &at572d940hf_tcb_device.dev, "t0_clk");
592 at91_clock_associate("tc1_clk", &at572d940hf_tcb_device.dev, "t1_clk");
593 at91_clock_associate("tc2_clk", &at572d940hf_tcb_device.dev, "t2_clk");
594 platform_device_register(&at572d940hf_tcb_device);
595}
596#else
597static void __init at91_add_device_tc(void) { }
598#endif
599
600
601/* --------------------------------------------------------------------
602 * RTT
603 * -------------------------------------------------------------------- */
604
605static struct resource rtt_resources[] = {
606 {
607 .start = AT91_BASE_SYS + AT91_RTT,
608 .end = AT91_BASE_SYS + AT91_RTT + SZ_16 - 1,
609 .flags = IORESOURCE_MEM,
610 }
611};
612
613static struct platform_device at572d940hf_rtt_device = {
614 .name = "at91_rtt",
615 .id = 0,
616 .resource = rtt_resources,
617 .num_resources = ARRAY_SIZE(rtt_resources),
618};
619
620static void __init at91_add_device_rtt(void)
621{
622 platform_device_register(&at572d940hf_rtt_device);
623}
624
625
626/* --------------------------------------------------------------------
627 * Watchdog
628 * -------------------------------------------------------------------- */
629
630#if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE)
631static struct platform_device at572d940hf_wdt_device = {
632 .name = "at91_wdt",
633 .id = -1,
634 .num_resources = 0,
635};
636
637static void __init at91_add_device_watchdog(void)
638{
639 platform_device_register(&at572d940hf_wdt_device);
640}
641#else
642static void __init at91_add_device_watchdog(void) {}
643#endif
644
645
646/* --------------------------------------------------------------------
647 * UART
648 * -------------------------------------------------------------------- */
649
650#if defined(CONFIG_SERIAL_ATMEL)
651static struct resource dbgu_resources[] = {
652 [0] = {
653 .start = AT91_VA_BASE_SYS + AT91_DBGU,
654 .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1,
655 .flags = IORESOURCE_MEM,
656 },
657 [1] = {
658 .start = AT91_ID_SYS,
659 .end = AT91_ID_SYS,
660 .flags = IORESOURCE_IRQ,
661 },
662};
663
664static struct atmel_uart_data dbgu_data = {
665 .use_dma_tx = 0,
666 .use_dma_rx = 0, /* DBGU not capable of receive DMA */
667 .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
668};
669
670static u64 dbgu_dmamask = DMA_BIT_MASK(32);
671
672static struct platform_device at572d940hf_dbgu_device = {
673 .name = "atmel_usart",
674 .id = 0,
675 .dev = {
676 .dma_mask = &dbgu_dmamask,
677 .coherent_dma_mask = DMA_BIT_MASK(32),
678 .platform_data = &dbgu_data,
679 },
680 .resource = dbgu_resources,
681 .num_resources = ARRAY_SIZE(dbgu_resources),
682};
683
684static inline void configure_dbgu_pins(void)
685{
686 at91_set_A_periph(AT91_PIN_PC31, 1); /* DTXD */
687 at91_set_A_periph(AT91_PIN_PC30, 0); /* DRXD */
688}
689
690static struct resource uart0_resources[] = {
691 [0] = {
692 .start = AT572D940HF_BASE_US0,
693 .end = AT572D940HF_BASE_US0 + SZ_16K - 1,
694 .flags = IORESOURCE_MEM,
695 },
696 [1] = {
697 .start = AT572D940HF_ID_US0,
698 .end = AT572D940HF_ID_US0,
699 .flags = IORESOURCE_IRQ,
700 },
701};
702
703static struct atmel_uart_data uart0_data = {
704 .use_dma_tx = 1,
705 .use_dma_rx = 1,
706};
707
708static u64 uart0_dmamask = DMA_BIT_MASK(32);
709
710static struct platform_device at572d940hf_uart0_device = {
711 .name = "atmel_usart",
712 .id = 1,
713 .dev = {
714 .dma_mask = &uart0_dmamask,
715 .coherent_dma_mask = DMA_BIT_MASK(32),
716 .platform_data = &uart0_data,
717 },
718 .resource = uart0_resources,
719 .num_resources = ARRAY_SIZE(uart0_resources),
720};
721
722static inline void configure_usart0_pins(unsigned pins)
723{
724 at91_set_A_periph(AT91_PIN_PA8, 1); /* TXD0 */
725 at91_set_A_periph(AT91_PIN_PA7, 0); /* RXD0 */
726
727 if (pins & ATMEL_UART_RTS)
728 at91_set_A_periph(AT91_PIN_PA10, 0); /* RTS0 */
729 if (pins & ATMEL_UART_CTS)
730 at91_set_A_periph(AT91_PIN_PA9, 0); /* CTS0 */
731}
732
733static struct resource uart1_resources[] = {
734 [0] = {
735 .start = AT572D940HF_BASE_US1,
736 .end = AT572D940HF_BASE_US1 + SZ_16K - 1,
737 .flags = IORESOURCE_MEM,
738 },
739 [1] = {
740 .start = AT572D940HF_ID_US1,
741 .end = AT572D940HF_ID_US1,
742 .flags = IORESOURCE_IRQ,
743 },
744};
745
746static struct atmel_uart_data uart1_data = {
747 .use_dma_tx = 1,
748 .use_dma_rx = 1,
749};
750
751static u64 uart1_dmamask = DMA_BIT_MASK(32);
752
753static struct platform_device at572d940hf_uart1_device = {
754 .name = "atmel_usart",
755 .id = 2,
756 .dev = {
757 .dma_mask = &uart1_dmamask,
758 .coherent_dma_mask = DMA_BIT_MASK(32),
759 .platform_data = &uart1_data,
760 },
761 .resource = uart1_resources,
762 .num_resources = ARRAY_SIZE(uart1_resources),
763};
764
765static inline void configure_usart1_pins(unsigned pins)
766{
767 at91_set_A_periph(AT91_PIN_PC10, 1); /* TXD1 */
768 at91_set_A_periph(AT91_PIN_PC9 , 0); /* RXD1 */
769
770 if (pins & ATMEL_UART_RTS)
771 at91_set_A_periph(AT91_PIN_PC12, 0); /* RTS1 */
772 if (pins & ATMEL_UART_CTS)
773 at91_set_A_periph(AT91_PIN_PC11, 0); /* CTS1 */
774}
775
776static struct resource uart2_resources[] = {
777 [0] = {
778 .start = AT572D940HF_BASE_US2,
779 .end = AT572D940HF_BASE_US2 + SZ_16K - 1,
780 .flags = IORESOURCE_MEM,
781 },
782 [1] = {
783 .start = AT572D940HF_ID_US2,
784 .end = AT572D940HF_ID_US2,
785 .flags = IORESOURCE_IRQ,
786 },
787};
788
789static struct atmel_uart_data uart2_data = {
790 .use_dma_tx = 1,
791 .use_dma_rx = 1,
792};
793
794static u64 uart2_dmamask = DMA_BIT_MASK(32);
795
796static struct platform_device at572d940hf_uart2_device = {
797 .name = "atmel_usart",
798 .id = 3,
799 .dev = {
800 .dma_mask = &uart2_dmamask,
801 .coherent_dma_mask = DMA_BIT_MASK(32),
802 .platform_data = &uart2_data,
803 },
804 .resource = uart2_resources,
805 .num_resources = ARRAY_SIZE(uart2_resources),
806};
807
808static inline void configure_usart2_pins(unsigned pins)
809{
810 at91_set_A_periph(AT91_PIN_PC15, 1); /* TXD2 */
811 at91_set_A_periph(AT91_PIN_PC14, 0); /* RXD2 */
812
813 if (pins & ATMEL_UART_RTS)
814 at91_set_A_periph(AT91_PIN_PC17, 0); /* RTS2 */
815 if (pins & ATMEL_UART_CTS)
816 at91_set_A_periph(AT91_PIN_PC16, 0); /* CTS2 */
817}
818
819static struct platform_device *__initdata at91_uarts[ATMEL_MAX_UART]; /* the UARTs to use */
820struct platform_device *atmel_default_console_device; /* the serial console device */
821
822void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
823{
824 struct platform_device *pdev;
825
826 switch (id) {
827 case 0: /* DBGU */
828 pdev = &at572d940hf_dbgu_device;
829 configure_dbgu_pins();
830 at91_clock_associate("mck", &pdev->dev, "usart");
831 break;
832 case AT572D940HF_ID_US0:
833 pdev = &at572d940hf_uart0_device;
834 configure_usart0_pins(pins);
835 at91_clock_associate("usart0_clk", &pdev->dev, "usart");
836 break;
837 case AT572D940HF_ID_US1:
838 pdev = &at572d940hf_uart1_device;
839 configure_usart1_pins(pins);
840 at91_clock_associate("usart1_clk", &pdev->dev, "usart");
841 break;
842 case AT572D940HF_ID_US2:
843 pdev = &at572d940hf_uart2_device;
844 configure_usart2_pins(pins);
845 at91_clock_associate("usart2_clk", &pdev->dev, "usart");
846 break;
847 default:
848 return;
849 }
850 pdev->id = portnr; /* update to mapped ID */
851
852 if (portnr < ATMEL_MAX_UART)
853 at91_uarts[portnr] = pdev;
854}
855
856void __init at91_set_serial_console(unsigned portnr)
857{
858 if (portnr < ATMEL_MAX_UART)
859 atmel_default_console_device = at91_uarts[portnr];
860}
861
862void __init at91_add_device_serial(void)
863{
864 int i;
865
866 for (i = 0; i < ATMEL_MAX_UART; i++) {
867 if (at91_uarts[i])
868 platform_device_register(at91_uarts[i]);
869 }
870
871 if (!atmel_default_console_device)
872 printk(KERN_INFO "AT91: No default serial console defined.\n");
873}
874
875#else
876void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {}
877void __init at91_set_serial_console(unsigned portnr) {}
878void __init at91_add_device_serial(void) {}
879#endif
880
881
882/* --------------------------------------------------------------------
883 * mAgic
884 * -------------------------------------------------------------------- */
885
886#ifdef CONFIG_MAGICV
887static struct resource mAgic_resources[] = {
888 {
889 .start = AT91_MAGIC_PM_BASE,
890 .end = AT91_MAGIC_PM_BASE + AT91_MAGIC_PM_SIZE - 1,
891 .flags = IORESOURCE_MEM,
892 },
893 {
894 .start = AT91_MAGIC_DM_I_BASE,
895 .end = AT91_MAGIC_DM_I_BASE + AT91_MAGIC_DM_I_SIZE - 1,
896 .flags = IORESOURCE_MEM,
897 },
898 {
899 .start = AT91_MAGIC_DM_F_BASE,
900 .end = AT91_MAGIC_DM_F_BASE + AT91_MAGIC_DM_F_SIZE - 1,
901 .flags = IORESOURCE_MEM,
902 },
903 {
904 .start = AT91_MAGIC_DM_DB_BASE,
905 .end = AT91_MAGIC_DM_DB_BASE + AT91_MAGIC_DM_DB_SIZE - 1,
906 .flags = IORESOURCE_MEM,
907 },
908 {
909 .start = AT91_MAGIC_REGS_BASE,
910 .end = AT91_MAGIC_REGS_BASE + AT91_MAGIC_REGS_SIZE - 1,
911 .flags = IORESOURCE_MEM,
912 },
913 {
914 .start = AT91_MAGIC_EXTPAGE_BASE,
915 .end = AT91_MAGIC_EXTPAGE_BASE + AT91_MAGIC_EXTPAGE_SIZE - 1,
916 .flags = IORESOURCE_MEM,
917 },
918 {
919 .start = AT572D940HF_ID_MSIRQ0,
920 .end = AT572D940HF_ID_MSIRQ0,
921 .flags = IORESOURCE_IRQ,
922 },
923 {
924 .start = AT572D940HF_ID_MHALT,
925 .end = AT572D940HF_ID_MHALT,
926 .flags = IORESOURCE_IRQ,
927 },
928 {
929 .start = AT572D940HF_ID_MEXC,
930 .end = AT572D940HF_ID_MEXC,
931 .flags = IORESOURCE_IRQ,
932 },
933 {
934 .start = AT572D940HF_ID_MEDMA,
935 .end = AT572D940HF_ID_MEDMA,
936 .flags = IORESOURCE_IRQ,
937 },
938};
939
940static struct platform_device mAgic_device = {
941 .name = "mAgic",
942 .id = -1,
943 .num_resources = ARRAY_SIZE(mAgic_resources),
944 .resource = mAgic_resources,
945};
946
947void __init at91_add_device_mAgic(void)
948{
949 platform_device_register(&mAgic_device);
950}
951#else
952void __init at91_add_device_mAgic(void) {}
953#endif
954
955
956/* -------------------------------------------------------------------- */
957
958/*
959 * These devices are always present and don't need any board-specific
960 * setup.
961 */
962static int __init at91_add_standard_devices(void)
963{
964 at91_add_device_rtt();
965 at91_add_device_watchdog();
966 at91_add_device_tc();
967 return 0;
968}
969
970arch_initcall(at91_add_standard_devices);
diff --git a/arch/arm/mach-at91/at91cap9.c b/arch/arm/mach-at91/at91cap9.c
index 73376170fb91..17fae4a42ab5 100644
--- a/arch/arm/mach-at91/at91cap9.c
+++ b/arch/arm/mach-at91/at91cap9.c
@@ -222,6 +222,25 @@ static struct clk *periph_clocks[] __initdata = {
222 // irq0 .. irq1 222 // irq0 .. irq1
223}; 223};
224 224
225static struct clk_lookup periph_clocks_lookups[] = {
226 CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk),
227 CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk),
228 CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk),
229 CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk),
230 CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
231 CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
232 CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb_clk),
233 CLKDEV_CON_DEV_ID("ssc", "ssc.0", &ssc0_clk),
234 CLKDEV_CON_DEV_ID("ssc", "ssc.1", &ssc1_clk),
235};
236
237static struct clk_lookup usart_clocks_lookups[] = {
238 CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
239 CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
240 CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
241 CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
242};
243
225/* 244/*
226 * The four programmable clocks. 245 * The four programmable clocks.
227 * You must configure pin multiplexing to bring these signals out. 246 * You must configure pin multiplexing to bring these signals out.
@@ -258,12 +277,29 @@ static void __init at91cap9_register_clocks(void)
258 for (i = 0; i < ARRAY_SIZE(periph_clocks); i++) 277 for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
259 clk_register(periph_clocks[i]); 278 clk_register(periph_clocks[i]);
260 279
280 clkdev_add_table(periph_clocks_lookups,
281 ARRAY_SIZE(periph_clocks_lookups));
282 clkdev_add_table(usart_clocks_lookups,
283 ARRAY_SIZE(usart_clocks_lookups));
284
261 clk_register(&pck0); 285 clk_register(&pck0);
262 clk_register(&pck1); 286 clk_register(&pck1);
263 clk_register(&pck2); 287 clk_register(&pck2);
264 clk_register(&pck3); 288 clk_register(&pck3);
265} 289}
266 290
291static struct clk_lookup console_clock_lookup;
292
293void __init at91cap9_set_console_clock(int id)
294{
295 if (id >= ARRAY_SIZE(usart_clocks_lookups))
296 return;
297
298 console_clock_lookup.con_id = "usart";
299 console_clock_lookup.clk = usart_clocks_lookups[id].clk;
300 clkdev_add(&console_clock_lookup);
301}
302
267/* -------------------------------------------------------------------- 303/* --------------------------------------------------------------------
268 * GPIO 304 * GPIO
269 * -------------------------------------------------------------------- */ 305 * -------------------------------------------------------------------- */
@@ -303,11 +339,14 @@ static void at91cap9_poweroff(void)
303 * AT91CAP9 processor initialization 339 * AT91CAP9 processor initialization
304 * -------------------------------------------------------------------- */ 340 * -------------------------------------------------------------------- */
305 341
306void __init at91cap9_initialize(unsigned long main_clock) 342void __init at91cap9_map_io(void)
307{ 343{
308 /* Map peripherals */ 344 /* Map peripherals */
309 iotable_init(at91cap9_io_desc, ARRAY_SIZE(at91cap9_io_desc)); 345 iotable_init(at91cap9_io_desc, ARRAY_SIZE(at91cap9_io_desc));
346}
310 347
348void __init at91cap9_initialize(unsigned long main_clock)
349{
311 at91_arch_reset = at91cap9_reset; 350 at91_arch_reset = at91cap9_reset;
312 pm_power_off = at91cap9_poweroff; 351 pm_power_off = at91cap9_poweroff;
313 at91_extern_irq = (1 << AT91CAP9_ID_IRQ0) | (1 << AT91CAP9_ID_IRQ1); 352 at91_extern_irq = (1 << AT91CAP9_ID_IRQ0) | (1 << AT91CAP9_ID_IRQ1);
diff --git a/arch/arm/mach-at91/at91cap9_devices.c b/arch/arm/mach-at91/at91cap9_devices.c
index 21020ceb2f3a..cd850ed6f335 100644
--- a/arch/arm/mach-at91/at91cap9_devices.c
+++ b/arch/arm/mach-at91/at91cap9_devices.c
@@ -181,10 +181,6 @@ void __init at91_add_device_usba(struct usba_platform_data *data)
181 181
182 /* Pullup pin is handled internally by USB device peripheral */ 182 /* Pullup pin is handled internally by USB device peripheral */
183 183
184 /* Clocks */
185 at91_clock_associate("utmi_clk", &at91_usba_udc_device.dev, "hclk");
186 at91_clock_associate("udphs_clk", &at91_usba_udc_device.dev, "pclk");
187
188 platform_device_register(&at91_usba_udc_device); 184 platform_device_register(&at91_usba_udc_device);
189} 185}
190#else 186#else
@@ -355,7 +351,6 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
355 } 351 }
356 352
357 mmc0_data = *data; 353 mmc0_data = *data;
358 at91_clock_associate("mci0_clk", &at91cap9_mmc0_device.dev, "mci_clk");
359 platform_device_register(&at91cap9_mmc0_device); 354 platform_device_register(&at91cap9_mmc0_device);
360 } else { /* MCI1 */ 355 } else { /* MCI1 */
361 /* CLK */ 356 /* CLK */
@@ -373,7 +368,6 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
373 } 368 }
374 369
375 mmc1_data = *data; 370 mmc1_data = *data;
376 at91_clock_associate("mci1_clk", &at91cap9_mmc1_device.dev, "mci_clk");
377 platform_device_register(&at91cap9_mmc1_device); 371 platform_device_register(&at91cap9_mmc1_device);
378 } 372 }
379} 373}
@@ -614,7 +608,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
614 at91_set_B_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */ 608 at91_set_B_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */
615 at91_set_B_periph(AT91_PIN_PA2, 0); /* SPI0_SPCK */ 609 at91_set_B_periph(AT91_PIN_PA2, 0); /* SPI0_SPCK */
616 610
617 at91_clock_associate("spi0_clk", &at91cap9_spi0_device.dev, "spi_clk");
618 platform_device_register(&at91cap9_spi0_device); 611 platform_device_register(&at91cap9_spi0_device);
619 } 612 }
620 if (enable_spi1) { 613 if (enable_spi1) {
@@ -622,7 +615,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
622 at91_set_A_periph(AT91_PIN_PB13, 0); /* SPI1_MOSI */ 615 at91_set_A_periph(AT91_PIN_PB13, 0); /* SPI1_MOSI */
623 at91_set_A_periph(AT91_PIN_PB14, 0); /* SPI1_SPCK */ 616 at91_set_A_periph(AT91_PIN_PB14, 0); /* SPI1_SPCK */
624 617
625 at91_clock_associate("spi1_clk", &at91cap9_spi1_device.dev, "spi_clk");
626 platform_device_register(&at91cap9_spi1_device); 618 platform_device_register(&at91cap9_spi1_device);
627 } 619 }
628} 620}
@@ -659,8 +651,6 @@ static struct platform_device at91cap9_tcb_device = {
659 651
660static void __init at91_add_device_tc(void) 652static void __init at91_add_device_tc(void)
661{ 653{
662 /* this chip has one clock and irq for all three TC channels */
663 at91_clock_associate("tcb_clk", &at91cap9_tcb_device.dev, "t0_clk");
664 platform_device_register(&at91cap9_tcb_device); 654 platform_device_register(&at91cap9_tcb_device);
665} 655}
666#else 656#else
@@ -1001,12 +991,10 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins)
1001 case AT91CAP9_ID_SSC0: 991 case AT91CAP9_ID_SSC0:
1002 pdev = &at91cap9_ssc0_device; 992 pdev = &at91cap9_ssc0_device;
1003 configure_ssc0_pins(pins); 993 configure_ssc0_pins(pins);
1004 at91_clock_associate("ssc0_clk", &pdev->dev, "ssc");
1005 break; 994 break;
1006 case AT91CAP9_ID_SSC1: 995 case AT91CAP9_ID_SSC1:
1007 pdev = &at91cap9_ssc1_device; 996 pdev = &at91cap9_ssc1_device;
1008 configure_ssc1_pins(pins); 997 configure_ssc1_pins(pins);
1009 at91_clock_associate("ssc1_clk", &pdev->dev, "ssc");
1010 break; 998 break;
1011 default: 999 default:
1012 return; 1000 return;
@@ -1199,32 +1187,30 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
1199void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) 1187void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
1200{ 1188{
1201 struct platform_device *pdev; 1189 struct platform_device *pdev;
1190 struct atmel_uart_data *pdata;
1202 1191
1203 switch (id) { 1192 switch (id) {
1204 case 0: /* DBGU */ 1193 case 0: /* DBGU */
1205 pdev = &at91cap9_dbgu_device; 1194 pdev = &at91cap9_dbgu_device;
1206 configure_dbgu_pins(); 1195 configure_dbgu_pins();
1207 at91_clock_associate("mck", &pdev->dev, "usart");
1208 break; 1196 break;
1209 case AT91CAP9_ID_US0: 1197 case AT91CAP9_ID_US0:
1210 pdev = &at91cap9_uart0_device; 1198 pdev = &at91cap9_uart0_device;
1211 configure_usart0_pins(pins); 1199 configure_usart0_pins(pins);
1212 at91_clock_associate("usart0_clk", &pdev->dev, "usart");
1213 break; 1200 break;
1214 case AT91CAP9_ID_US1: 1201 case AT91CAP9_ID_US1:
1215 pdev = &at91cap9_uart1_device; 1202 pdev = &at91cap9_uart1_device;
1216 configure_usart1_pins(pins); 1203 configure_usart1_pins(pins);
1217 at91_clock_associate("usart1_clk", &pdev->dev, "usart");
1218 break; 1204 break;
1219 case AT91CAP9_ID_US2: 1205 case AT91CAP9_ID_US2:
1220 pdev = &at91cap9_uart2_device; 1206 pdev = &at91cap9_uart2_device;
1221 configure_usart2_pins(pins); 1207 configure_usart2_pins(pins);
1222 at91_clock_associate("usart2_clk", &pdev->dev, "usart");
1223 break; 1208 break;
1224 default: 1209 default:
1225 return; 1210 return;
1226 } 1211 }
1227 pdev->id = portnr; /* update to mapped ID */ 1212 pdata = pdev->dev.platform_data;
1213 pdata->num = portnr; /* update to mapped ID */
1228 1214
1229 if (portnr < ATMEL_MAX_UART) 1215 if (portnr < ATMEL_MAX_UART)
1230 at91_uarts[portnr] = pdev; 1216 at91_uarts[portnr] = pdev;
@@ -1232,8 +1218,10 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
1232 1218
1233void __init at91_set_serial_console(unsigned portnr) 1219void __init at91_set_serial_console(unsigned portnr)
1234{ 1220{
1235 if (portnr < ATMEL_MAX_UART) 1221 if (portnr < ATMEL_MAX_UART) {
1236 atmel_default_console_device = at91_uarts[portnr]; 1222 atmel_default_console_device = at91_uarts[portnr];
1223 at91cap9_set_console_clock(portnr);
1224 }
1237} 1225}
1238 1226
1239void __init at91_add_device_serial(void) 1227void __init at91_add_device_serial(void)
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c
index 2e9ecad97f3d..b228ce9e21a1 100644
--- a/arch/arm/mach-at91/at91rm9200.c
+++ b/arch/arm/mach-at91/at91rm9200.c
@@ -18,6 +18,7 @@
18#include <mach/at91rm9200.h> 18#include <mach/at91rm9200.h>
19#include <mach/at91_pmc.h> 19#include <mach/at91_pmc.h>
20#include <mach/at91_st.h> 20#include <mach/at91_st.h>
21#include <mach/cpu.h>
21 22
22#include "generic.h" 23#include "generic.h"
23#include "clock.h" 24#include "clock.h"
@@ -191,6 +192,26 @@ static struct clk *periph_clocks[] __initdata = {
191 // irq0 .. irq6 192 // irq0 .. irq6
192}; 193};
193 194
195static struct clk_lookup periph_clocks_lookups[] = {
196 CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
197 CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
198 CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
199 CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
200 CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
201 CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
202 CLKDEV_CON_DEV_ID("ssc", "ssc.0", &ssc0_clk),
203 CLKDEV_CON_DEV_ID("ssc", "ssc.1", &ssc1_clk),
204 CLKDEV_CON_DEV_ID("ssc", "ssc.2", &ssc2_clk),
205};
206
207static struct clk_lookup usart_clocks_lookups[] = {
208 CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
209 CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
210 CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
211 CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
212 CLKDEV_CON_DEV_ID("usart", "atmel_usart.4", &usart3_clk),
213};
214
194/* 215/*
195 * The four programmable clocks. 216 * The four programmable clocks.
196 * You must configure pin multiplexing to bring these signals out. 217 * You must configure pin multiplexing to bring these signals out.
@@ -227,12 +248,29 @@ static void __init at91rm9200_register_clocks(void)
227 for (i = 0; i < ARRAY_SIZE(periph_clocks); i++) 248 for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
228 clk_register(periph_clocks[i]); 249 clk_register(periph_clocks[i]);
229 250
251 clkdev_add_table(periph_clocks_lookups,
252 ARRAY_SIZE(periph_clocks_lookups));
253 clkdev_add_table(usart_clocks_lookups,
254 ARRAY_SIZE(usart_clocks_lookups));
255
230 clk_register(&pck0); 256 clk_register(&pck0);
231 clk_register(&pck1); 257 clk_register(&pck1);
232 clk_register(&pck2); 258 clk_register(&pck2);
233 clk_register(&pck3); 259 clk_register(&pck3);
234} 260}
235 261
262static struct clk_lookup console_clock_lookup;
263
264void __init at91rm9200_set_console_clock(int id)
265{
266 if (id >= ARRAY_SIZE(usart_clocks_lookups))
267 return;
268
269 console_clock_lookup.con_id = "usart";
270 console_clock_lookup.clk = usart_clocks_lookups[id].clk;
271 clkdev_add(&console_clock_lookup);
272}
273
236/* -------------------------------------------------------------------- 274/* --------------------------------------------------------------------
237 * GPIO 275 * GPIO
238 * -------------------------------------------------------------------- */ 276 * -------------------------------------------------------------------- */
@@ -266,15 +304,25 @@ static void at91rm9200_reset(void)
266 at91_sys_write(AT91_ST_CR, AT91_ST_WDRST); 304 at91_sys_write(AT91_ST_CR, AT91_ST_WDRST);
267} 305}
268 306
307int rm9200_type;
308EXPORT_SYMBOL(rm9200_type);
309
310void __init at91rm9200_set_type(int type)
311{
312 rm9200_type = type;
313}
269 314
270/* -------------------------------------------------------------------- 315/* --------------------------------------------------------------------
271 * AT91RM9200 processor initialization 316 * AT91RM9200 processor initialization
272 * -------------------------------------------------------------------- */ 317 * -------------------------------------------------------------------- */
273void __init at91rm9200_initialize(unsigned long main_clock, unsigned short banks) 318void __init at91rm9200_map_io(void)
274{ 319{
275 /* Map peripherals */ 320 /* Map peripherals */
276 iotable_init(at91rm9200_io_desc, ARRAY_SIZE(at91rm9200_io_desc)); 321 iotable_init(at91rm9200_io_desc, ARRAY_SIZE(at91rm9200_io_desc));
322}
277 323
324void __init at91rm9200_initialize(unsigned long main_clock)
325{
278 at91_arch_reset = at91rm9200_reset; 326 at91_arch_reset = at91rm9200_reset;
279 at91_extern_irq = (1 << AT91RM9200_ID_IRQ0) | (1 << AT91RM9200_ID_IRQ1) 327 at91_extern_irq = (1 << AT91RM9200_ID_IRQ0) | (1 << AT91RM9200_ID_IRQ1)
280 | (1 << AT91RM9200_ID_IRQ2) | (1 << AT91RM9200_ID_IRQ3) 328 | (1 << AT91RM9200_ID_IRQ2) | (1 << AT91RM9200_ID_IRQ3)
@@ -288,7 +336,8 @@ void __init at91rm9200_initialize(unsigned long main_clock, unsigned short banks
288 at91rm9200_register_clocks(); 336 at91rm9200_register_clocks();
289 337
290 /* Initialize GPIO subsystem */ 338 /* Initialize GPIO subsystem */
291 at91_gpio_init(at91rm9200_gpio, banks); 339 at91_gpio_init(at91rm9200_gpio,
340 cpu_is_at91rm9200_bga() ? AT91RM9200_BGA : AT91RM9200_PQFP);
292} 341}
293 342
294 343
diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
index 7b539228e0ef..a0ba475be04c 100644
--- a/arch/arm/mach-at91/at91rm9200_devices.c
+++ b/arch/arm/mach-at91/at91rm9200_devices.c
@@ -644,15 +644,7 @@ static struct platform_device at91rm9200_tcb1_device = {
644 644
645static void __init at91_add_device_tc(void) 645static void __init at91_add_device_tc(void)
646{ 646{
647 /* this chip has a separate clock and irq for each TC channel */
648 at91_clock_associate("tc0_clk", &at91rm9200_tcb0_device.dev, "t0_clk");
649 at91_clock_associate("tc1_clk", &at91rm9200_tcb0_device.dev, "t1_clk");
650 at91_clock_associate("tc2_clk", &at91rm9200_tcb0_device.dev, "t2_clk");
651 platform_device_register(&at91rm9200_tcb0_device); 647 platform_device_register(&at91rm9200_tcb0_device);
652
653 at91_clock_associate("tc3_clk", &at91rm9200_tcb1_device.dev, "t0_clk");
654 at91_clock_associate("tc4_clk", &at91rm9200_tcb1_device.dev, "t1_clk");
655 at91_clock_associate("tc5_clk", &at91rm9200_tcb1_device.dev, "t2_clk");
656 platform_device_register(&at91rm9200_tcb1_device); 648 platform_device_register(&at91rm9200_tcb1_device);
657} 649}
658#else 650#else
@@ -849,17 +841,14 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins)
849 case AT91RM9200_ID_SSC0: 841 case AT91RM9200_ID_SSC0:
850 pdev = &at91rm9200_ssc0_device; 842 pdev = &at91rm9200_ssc0_device;
851 configure_ssc0_pins(pins); 843 configure_ssc0_pins(pins);
852 at91_clock_associate("ssc0_clk", &pdev->dev, "ssc");
853 break; 844 break;
854 case AT91RM9200_ID_SSC1: 845 case AT91RM9200_ID_SSC1:
855 pdev = &at91rm9200_ssc1_device; 846 pdev = &at91rm9200_ssc1_device;
856 configure_ssc1_pins(pins); 847 configure_ssc1_pins(pins);
857 at91_clock_associate("ssc1_clk", &pdev->dev, "ssc");
858 break; 848 break;
859 case AT91RM9200_ID_SSC2: 849 case AT91RM9200_ID_SSC2:
860 pdev = &at91rm9200_ssc2_device; 850 pdev = &at91rm9200_ssc2_device;
861 configure_ssc2_pins(pins); 851 configure_ssc2_pins(pins);
862 at91_clock_associate("ssc2_clk", &pdev->dev, "ssc");
863 break; 852 break;
864 default: 853 default:
865 return; 854 return;
@@ -1109,37 +1098,34 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
1109void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) 1098void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
1110{ 1099{
1111 struct platform_device *pdev; 1100 struct platform_device *pdev;
1101 struct atmel_uart_data *pdata;
1112 1102
1113 switch (id) { 1103 switch (id) {
1114 case 0: /* DBGU */ 1104 case 0: /* DBGU */
1115 pdev = &at91rm9200_dbgu_device; 1105 pdev = &at91rm9200_dbgu_device;
1116 configure_dbgu_pins(); 1106 configure_dbgu_pins();
1117 at91_clock_associate("mck", &pdev->dev, "usart");
1118 break; 1107 break;
1119 case AT91RM9200_ID_US0: 1108 case AT91RM9200_ID_US0:
1120 pdev = &at91rm9200_uart0_device; 1109 pdev = &at91rm9200_uart0_device;
1121 configure_usart0_pins(pins); 1110 configure_usart0_pins(pins);
1122 at91_clock_associate("usart0_clk", &pdev->dev, "usart");
1123 break; 1111 break;
1124 case AT91RM9200_ID_US1: 1112 case AT91RM9200_ID_US1:
1125 pdev = &at91rm9200_uart1_device; 1113 pdev = &at91rm9200_uart1_device;
1126 configure_usart1_pins(pins); 1114 configure_usart1_pins(pins);
1127 at91_clock_associate("usart1_clk", &pdev->dev, "usart");
1128 break; 1115 break;
1129 case AT91RM9200_ID_US2: 1116 case AT91RM9200_ID_US2:
1130 pdev = &at91rm9200_uart2_device; 1117 pdev = &at91rm9200_uart2_device;
1131 configure_usart2_pins(pins); 1118 configure_usart2_pins(pins);
1132 at91_clock_associate("usart2_clk", &pdev->dev, "usart");
1133 break; 1119 break;
1134 case AT91RM9200_ID_US3: 1120 case AT91RM9200_ID_US3:
1135 pdev = &at91rm9200_uart3_device; 1121 pdev = &at91rm9200_uart3_device;
1136 configure_usart3_pins(pins); 1122 configure_usart3_pins(pins);
1137 at91_clock_associate("usart3_clk", &pdev->dev, "usart");
1138 break; 1123 break;
1139 default: 1124 default:
1140 return; 1125 return;
1141 } 1126 }
1142 pdev->id = portnr; /* update to mapped ID */ 1127 pdata = pdev->dev.platform_data;
1128 pdata->num = portnr; /* update to mapped ID */
1143 1129
1144 if (portnr < ATMEL_MAX_UART) 1130 if (portnr < ATMEL_MAX_UART)
1145 at91_uarts[portnr] = pdev; 1131 at91_uarts[portnr] = pdev;
@@ -1147,8 +1133,10 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
1147 1133
1148void __init at91_set_serial_console(unsigned portnr) 1134void __init at91_set_serial_console(unsigned portnr)
1149{ 1135{
1150 if (portnr < ATMEL_MAX_UART) 1136 if (portnr < ATMEL_MAX_UART) {
1151 atmel_default_console_device = at91_uarts[portnr]; 1137 atmel_default_console_device = at91_uarts[portnr];
1138 at91rm9200_set_console_clock(portnr);
1139 }
1152} 1140}
1153 1141
1154void __init at91_add_device_serial(void) 1142void __init at91_add_device_serial(void)
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index 195208b30024..7d606b04d313 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -231,6 +231,28 @@ static struct clk *periph_clocks[] __initdata = {
231 // irq0 .. irq2 231 // irq0 .. irq2
232}; 232};
233 233
234static struct clk_lookup periph_clocks_lookups[] = {
235 CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
236 CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
237 CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
238 CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
239 CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
240 CLKDEV_CON_DEV_ID("t3_clk", "atmel_tcb.1", &tc3_clk),
241 CLKDEV_CON_DEV_ID("t4_clk", "atmel_tcb.1", &tc4_clk),
242 CLKDEV_CON_DEV_ID("t5_clk", "atmel_tcb.1", &tc5_clk),
243 CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk),
244};
245
246static struct clk_lookup usart_clocks_lookups[] = {
247 CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
248 CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
249 CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
250 CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
251 CLKDEV_CON_DEV_ID("usart", "atmel_usart.4", &usart3_clk),
252 CLKDEV_CON_DEV_ID("usart", "atmel_usart.5", &usart4_clk),
253 CLKDEV_CON_DEV_ID("usart", "atmel_usart.6", &usart5_clk),
254};
255
234/* 256/*
235 * The two programmable clocks. 257 * The two programmable clocks.
236 * You must configure pin multiplexing to bring these signals out. 258 * You must configure pin multiplexing to bring these signals out.
@@ -255,10 +277,27 @@ static void __init at91sam9260_register_clocks(void)
255 for (i = 0; i < ARRAY_SIZE(periph_clocks); i++) 277 for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
256 clk_register(periph_clocks[i]); 278 clk_register(periph_clocks[i]);
257 279
280 clkdev_add_table(periph_clocks_lookups,
281 ARRAY_SIZE(periph_clocks_lookups));
282 clkdev_add_table(usart_clocks_lookups,
283 ARRAY_SIZE(usart_clocks_lookups));
284
258 clk_register(&pck0); 285 clk_register(&pck0);
259 clk_register(&pck1); 286 clk_register(&pck1);
260} 287}
261 288
289static struct clk_lookup console_clock_lookup;
290
291void __init at91sam9260_set_console_clock(int id)
292{
293 if (id >= ARRAY_SIZE(usart_clocks_lookups))
294 return;
295
296 console_clock_lookup.con_id = "usart";
297 console_clock_lookup.clk = usart_clocks_lookups[id].clk;
298 clkdev_add(&console_clock_lookup);
299}
300
262/* -------------------------------------------------------------------- 301/* --------------------------------------------------------------------
263 * GPIO 302 * GPIO
264 * -------------------------------------------------------------------- */ 303 * -------------------------------------------------------------------- */
@@ -289,7 +328,7 @@ static void at91sam9260_poweroff(void)
289 * AT91SAM9260 processor initialization 328 * AT91SAM9260 processor initialization
290 * -------------------------------------------------------------------- */ 329 * -------------------------------------------------------------------- */
291 330
292static void __init at91sam9xe_initialize(void) 331static void __init at91sam9xe_map_io(void)
293{ 332{
294 unsigned long cidr, sram_size; 333 unsigned long cidr, sram_size;
295 334
@@ -310,18 +349,21 @@ static void __init at91sam9xe_initialize(void)
310 iotable_init(at91sam9xe_sram_desc, ARRAY_SIZE(at91sam9xe_sram_desc)); 349 iotable_init(at91sam9xe_sram_desc, ARRAY_SIZE(at91sam9xe_sram_desc));
311} 350}
312 351
313void __init at91sam9260_initialize(unsigned long main_clock) 352void __init at91sam9260_map_io(void)
314{ 353{
315 /* Map peripherals */ 354 /* Map peripherals */
316 iotable_init(at91sam9260_io_desc, ARRAY_SIZE(at91sam9260_io_desc)); 355 iotable_init(at91sam9260_io_desc, ARRAY_SIZE(at91sam9260_io_desc));
317 356
318 if (cpu_is_at91sam9xe()) 357 if (cpu_is_at91sam9xe())
319 at91sam9xe_initialize(); 358 at91sam9xe_map_io();
320 else if (cpu_is_at91sam9g20()) 359 else if (cpu_is_at91sam9g20())
321 iotable_init(at91sam9g20_sram_desc, ARRAY_SIZE(at91sam9g20_sram_desc)); 360 iotable_init(at91sam9g20_sram_desc, ARRAY_SIZE(at91sam9g20_sram_desc));
322 else 361 else
323 iotable_init(at91sam9260_sram_desc, ARRAY_SIZE(at91sam9260_sram_desc)); 362 iotable_init(at91sam9260_sram_desc, ARRAY_SIZE(at91sam9260_sram_desc));
363}
324 364
365void __init at91sam9260_initialize(unsigned long main_clock)
366{
325 at91_arch_reset = at91sam9_alt_reset; 367 at91_arch_reset = at91sam9_alt_reset;
326 pm_power_off = at91sam9260_poweroff; 368 pm_power_off = at91sam9260_poweroff;
327 at91_extern_irq = (1 << AT91SAM9260_ID_IRQ0) | (1 << AT91SAM9260_ID_IRQ1) 369 at91_extern_irq = (1 << AT91SAM9260_ID_IRQ0) | (1 << AT91SAM9260_ID_IRQ1)
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index 07eb7b07e442..1fdeb9058a76 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -609,7 +609,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
609 at91_set_A_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */ 609 at91_set_A_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */
610 at91_set_A_periph(AT91_PIN_PA2, 0); /* SPI1_SPCK */ 610 at91_set_A_periph(AT91_PIN_PA2, 0); /* SPI1_SPCK */
611 611
612 at91_clock_associate("spi0_clk", &at91sam9260_spi0_device.dev, "spi_clk");
613 platform_device_register(&at91sam9260_spi0_device); 612 platform_device_register(&at91sam9260_spi0_device);
614 } 613 }
615 if (enable_spi1) { 614 if (enable_spi1) {
@@ -617,7 +616,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
617 at91_set_A_periph(AT91_PIN_PB1, 0); /* SPI1_MOSI */ 616 at91_set_A_periph(AT91_PIN_PB1, 0); /* SPI1_MOSI */
618 at91_set_A_periph(AT91_PIN_PB2, 0); /* SPI1_SPCK */ 617 at91_set_A_periph(AT91_PIN_PB2, 0); /* SPI1_SPCK */
619 618
620 at91_clock_associate("spi1_clk", &at91sam9260_spi1_device.dev, "spi_clk");
621 platform_device_register(&at91sam9260_spi1_device); 619 platform_device_register(&at91sam9260_spi1_device);
622 } 620 }
623} 621}
@@ -694,15 +692,7 @@ static struct platform_device at91sam9260_tcb1_device = {
694 692
695static void __init at91_add_device_tc(void) 693static void __init at91_add_device_tc(void)
696{ 694{
697 /* this chip has a separate clock and irq for each TC channel */
698 at91_clock_associate("tc0_clk", &at91sam9260_tcb0_device.dev, "t0_clk");
699 at91_clock_associate("tc1_clk", &at91sam9260_tcb0_device.dev, "t1_clk");
700 at91_clock_associate("tc2_clk", &at91sam9260_tcb0_device.dev, "t2_clk");
701 platform_device_register(&at91sam9260_tcb0_device); 695 platform_device_register(&at91sam9260_tcb0_device);
702
703 at91_clock_associate("tc3_clk", &at91sam9260_tcb1_device.dev, "t0_clk");
704 at91_clock_associate("tc4_clk", &at91sam9260_tcb1_device.dev, "t1_clk");
705 at91_clock_associate("tc5_clk", &at91sam9260_tcb1_device.dev, "t2_clk");
706 platform_device_register(&at91sam9260_tcb1_device); 696 platform_device_register(&at91sam9260_tcb1_device);
707} 697}
708#else 698#else
@@ -820,7 +810,6 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins)
820 case AT91SAM9260_ID_SSC: 810 case AT91SAM9260_ID_SSC:
821 pdev = &at91sam9260_ssc_device; 811 pdev = &at91sam9260_ssc_device;
822 configure_ssc_pins(pins); 812 configure_ssc_pins(pins);
823 at91_clock_associate("ssc_clk", &pdev->dev, "pclk");
824 break; 813 break;
825 default: 814 default:
826 return; 815 return;
@@ -1139,47 +1128,42 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
1139void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) 1128void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
1140{ 1129{
1141 struct platform_device *pdev; 1130 struct platform_device *pdev;
1131 struct atmel_uart_data *pdata;
1142 1132
1143 switch (id) { 1133 switch (id) {
1144 case 0: /* DBGU */ 1134 case 0: /* DBGU */
1145 pdev = &at91sam9260_dbgu_device; 1135 pdev = &at91sam9260_dbgu_device;
1146 configure_dbgu_pins(); 1136 configure_dbgu_pins();
1147 at91_clock_associate("mck", &pdev->dev, "usart");
1148 break; 1137 break;
1149 case AT91SAM9260_ID_US0: 1138 case AT91SAM9260_ID_US0:
1150 pdev = &at91sam9260_uart0_device; 1139 pdev = &at91sam9260_uart0_device;
1151 configure_usart0_pins(pins); 1140 configure_usart0_pins(pins);
1152 at91_clock_associate("usart0_clk", &pdev->dev, "usart");
1153 break; 1141 break;
1154 case AT91SAM9260_ID_US1: 1142 case AT91SAM9260_ID_US1:
1155 pdev = &at91sam9260_uart1_device; 1143 pdev = &at91sam9260_uart1_device;
1156 configure_usart1_pins(pins); 1144 configure_usart1_pins(pins);
1157 at91_clock_associate("usart1_clk", &pdev->dev, "usart");
1158 break; 1145 break;
1159 case AT91SAM9260_ID_US2: 1146 case AT91SAM9260_ID_US2:
1160 pdev = &at91sam9260_uart2_device; 1147 pdev = &at91sam9260_uart2_device;
1161 configure_usart2_pins(pins); 1148 configure_usart2_pins(pins);
1162 at91_clock_associate("usart2_clk", &pdev->dev, "usart");
1163 break; 1149 break;
1164 case AT91SAM9260_ID_US3: 1150 case AT91SAM9260_ID_US3:
1165 pdev = &at91sam9260_uart3_device; 1151 pdev = &at91sam9260_uart3_device;
1166 configure_usart3_pins(pins); 1152 configure_usart3_pins(pins);
1167 at91_clock_associate("usart3_clk", &pdev->dev, "usart");
1168 break; 1153 break;
1169 case AT91SAM9260_ID_US4: 1154 case AT91SAM9260_ID_US4:
1170 pdev = &at91sam9260_uart4_device; 1155 pdev = &at91sam9260_uart4_device;
1171 configure_usart4_pins(); 1156 configure_usart4_pins();
1172 at91_clock_associate("usart4_clk", &pdev->dev, "usart");
1173 break; 1157 break;
1174 case AT91SAM9260_ID_US5: 1158 case AT91SAM9260_ID_US5:
1175 pdev = &at91sam9260_uart5_device; 1159 pdev = &at91sam9260_uart5_device;
1176 configure_usart5_pins(); 1160 configure_usart5_pins();
1177 at91_clock_associate("usart5_clk", &pdev->dev, "usart");
1178 break; 1161 break;
1179 default: 1162 default:
1180 return; 1163 return;
1181 } 1164 }
1182 pdev->id = portnr; /* update to mapped ID */ 1165 pdata = pdev->dev.platform_data;
1166 pdata->num = portnr; /* update to mapped ID */
1183 1167
1184 if (portnr < ATMEL_MAX_UART) 1168 if (portnr < ATMEL_MAX_UART)
1185 at91_uarts[portnr] = pdev; 1169 at91_uarts[portnr] = pdev;
@@ -1187,8 +1171,10 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
1187 1171
1188void __init at91_set_serial_console(unsigned portnr) 1172void __init at91_set_serial_console(unsigned portnr)
1189{ 1173{
1190 if (portnr < ATMEL_MAX_UART) 1174 if (portnr < ATMEL_MAX_UART) {
1191 atmel_default_console_device = at91_uarts[portnr]; 1175 atmel_default_console_device = at91_uarts[portnr];
1176 at91sam9260_set_console_clock(portnr);
1177 }
1192} 1178}
1193 1179
1194void __init at91_add_device_serial(void) 1180void __init at91_add_device_serial(void)
diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c
index fcad88668504..c1483168c97a 100644
--- a/arch/arm/mach-at91/at91sam9261.c
+++ b/arch/arm/mach-at91/at91sam9261.c
@@ -178,6 +178,24 @@ static struct clk *periph_clocks[] __initdata = {
178 // irq0 .. irq2 178 // irq0 .. irq2
179}; 179};
180 180
181static struct clk_lookup periph_clocks_lookups[] = {
182 CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
183 CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
184 CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
185 CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
186 CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc1_clk),
187 CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
188 CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
189 CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),
190};
191
192static struct clk_lookup usart_clocks_lookups[] = {
193 CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
194 CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
195 CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
196 CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
197};
198
181/* 199/*
182 * The four programmable clocks. 200 * The four programmable clocks.
183 * You must configure pin multiplexing to bring these signals out. 201 * You must configure pin multiplexing to bring these signals out.
@@ -228,6 +246,11 @@ static void __init at91sam9261_register_clocks(void)
228 for (i = 0; i < ARRAY_SIZE(periph_clocks); i++) 246 for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
229 clk_register(periph_clocks[i]); 247 clk_register(periph_clocks[i]);
230 248
249 clkdev_add_table(periph_clocks_lookups,
250 ARRAY_SIZE(periph_clocks_lookups));
251 clkdev_add_table(usart_clocks_lookups,
252 ARRAY_SIZE(usart_clocks_lookups));
253
231 clk_register(&pck0); 254 clk_register(&pck0);
232 clk_register(&pck1); 255 clk_register(&pck1);
233 clk_register(&pck2); 256 clk_register(&pck2);
@@ -237,6 +260,18 @@ static void __init at91sam9261_register_clocks(void)
237 clk_register(&hck1); 260 clk_register(&hck1);
238} 261}
239 262
263static struct clk_lookup console_clock_lookup;
264
265void __init at91sam9261_set_console_clock(int id)
266{
267 if (id >= ARRAY_SIZE(usart_clocks_lookups))
268 return;
269
270 console_clock_lookup.con_id = "usart";
271 console_clock_lookup.clk = usart_clocks_lookups[id].clk;
272 clkdev_add(&console_clock_lookup);
273}
274
240/* -------------------------------------------------------------------- 275/* --------------------------------------------------------------------
241 * GPIO 276 * GPIO
242 * -------------------------------------------------------------------- */ 277 * -------------------------------------------------------------------- */
@@ -267,7 +302,7 @@ static void at91sam9261_poweroff(void)
267 * AT91SAM9261 processor initialization 302 * AT91SAM9261 processor initialization
268 * -------------------------------------------------------------------- */ 303 * -------------------------------------------------------------------- */
269 304
270void __init at91sam9261_initialize(unsigned long main_clock) 305void __init at91sam9261_map_io(void)
271{ 306{
272 /* Map peripherals */ 307 /* Map peripherals */
273 iotable_init(at91sam9261_io_desc, ARRAY_SIZE(at91sam9261_io_desc)); 308 iotable_init(at91sam9261_io_desc, ARRAY_SIZE(at91sam9261_io_desc));
@@ -276,8 +311,10 @@ void __init at91sam9261_initialize(unsigned long main_clock)
276 iotable_init(at91sam9g10_sram_desc, ARRAY_SIZE(at91sam9g10_sram_desc)); 311 iotable_init(at91sam9g10_sram_desc, ARRAY_SIZE(at91sam9g10_sram_desc));
277 else 312 else
278 iotable_init(at91sam9261_sram_desc, ARRAY_SIZE(at91sam9261_sram_desc)); 313 iotable_init(at91sam9261_sram_desc, ARRAY_SIZE(at91sam9261_sram_desc));
314}
279 315
280 316void __init at91sam9261_initialize(unsigned long main_clock)
317{
281 at91_arch_reset = at91sam9_alt_reset; 318 at91_arch_reset = at91sam9_alt_reset;
282 pm_power_off = at91sam9261_poweroff; 319 pm_power_off = at91sam9261_poweroff;
283 at91_extern_irq = (1 << AT91SAM9261_ID_IRQ0) | (1 << AT91SAM9261_ID_IRQ1) 320 at91_extern_irq = (1 << AT91SAM9261_ID_IRQ0) | (1 << AT91SAM9261_ID_IRQ1)
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index 59fc48311fb0..3eb4538fceeb 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -426,7 +426,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
426 at91_set_A_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */ 426 at91_set_A_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */
427 at91_set_A_periph(AT91_PIN_PA2, 0); /* SPI0_SPCK */ 427 at91_set_A_periph(AT91_PIN_PA2, 0); /* SPI0_SPCK */
428 428
429 at91_clock_associate("spi0_clk", &at91sam9261_spi0_device.dev, "spi_clk");
430 platform_device_register(&at91sam9261_spi0_device); 429 platform_device_register(&at91sam9261_spi0_device);
431 } 430 }
432 if (enable_spi1) { 431 if (enable_spi1) {
@@ -434,7 +433,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
434 at91_set_A_periph(AT91_PIN_PB31, 0); /* SPI1_MOSI */ 433 at91_set_A_periph(AT91_PIN_PB31, 0); /* SPI1_MOSI */
435 at91_set_A_periph(AT91_PIN_PB29, 0); /* SPI1_SPCK */ 434 at91_set_A_periph(AT91_PIN_PB29, 0); /* SPI1_SPCK */
436 435
437 at91_clock_associate("spi1_clk", &at91sam9261_spi1_device.dev, "spi_clk");
438 platform_device_register(&at91sam9261_spi1_device); 436 platform_device_register(&at91sam9261_spi1_device);
439 } 437 }
440} 438}
@@ -581,10 +579,6 @@ static struct platform_device at91sam9261_tcb_device = {
581 579
582static void __init at91_add_device_tc(void) 580static void __init at91_add_device_tc(void)
583{ 581{
584 /* this chip has a separate clock and irq for each TC channel */
585 at91_clock_associate("tc0_clk", &at91sam9261_tcb_device.dev, "t0_clk");
586 at91_clock_associate("tc1_clk", &at91sam9261_tcb_device.dev, "t1_clk");
587 at91_clock_associate("tc2_clk", &at91sam9261_tcb_device.dev, "t2_clk");
588 platform_device_register(&at91sam9261_tcb_device); 582 platform_device_register(&at91sam9261_tcb_device);
589} 583}
590#else 584#else
@@ -786,17 +780,14 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins)
786 case AT91SAM9261_ID_SSC0: 780 case AT91SAM9261_ID_SSC0:
787 pdev = &at91sam9261_ssc0_device; 781 pdev = &at91sam9261_ssc0_device;
788 configure_ssc0_pins(pins); 782 configure_ssc0_pins(pins);
789 at91_clock_associate("ssc0_clk", &pdev->dev, "pclk");
790 break; 783 break;
791 case AT91SAM9261_ID_SSC1: 784 case AT91SAM9261_ID_SSC1:
792 pdev = &at91sam9261_ssc1_device; 785 pdev = &at91sam9261_ssc1_device;
793 configure_ssc1_pins(pins); 786 configure_ssc1_pins(pins);
794 at91_clock_associate("ssc1_clk", &pdev->dev, "pclk");
795 break; 787 break;
796 case AT91SAM9261_ID_SSC2: 788 case AT91SAM9261_ID_SSC2:
797 pdev = &at91sam9261_ssc2_device; 789 pdev = &at91sam9261_ssc2_device;
798 configure_ssc2_pins(pins); 790 configure_ssc2_pins(pins);
799 at91_clock_associate("ssc2_clk", &pdev->dev, "pclk");
800 break; 791 break;
801 default: 792 default:
802 return; 793 return;
@@ -989,32 +980,30 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
989void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) 980void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
990{ 981{
991 struct platform_device *pdev; 982 struct platform_device *pdev;
983 struct atmel_uart_data *pdata;
992 984
993 switch (id) { 985 switch (id) {
994 case 0: /* DBGU */ 986 case 0: /* DBGU */
995 pdev = &at91sam9261_dbgu_device; 987 pdev = &at91sam9261_dbgu_device;
996 configure_dbgu_pins(); 988 configure_dbgu_pins();
997 at91_clock_associate("mck", &pdev->dev, "usart");
998 break; 989 break;
999 case AT91SAM9261_ID_US0: 990 case AT91SAM9261_ID_US0:
1000 pdev = &at91sam9261_uart0_device; 991 pdev = &at91sam9261_uart0_device;
1001 configure_usart0_pins(pins); 992 configure_usart0_pins(pins);
1002 at91_clock_associate("usart0_clk", &pdev->dev, "usart");
1003 break; 993 break;
1004 case AT91SAM9261_ID_US1: 994 case AT91SAM9261_ID_US1:
1005 pdev = &at91sam9261_uart1_device; 995 pdev = &at91sam9261_uart1_device;
1006 configure_usart1_pins(pins); 996 configure_usart1_pins(pins);
1007 at91_clock_associate("usart1_clk", &pdev->dev, "usart");
1008 break; 997 break;
1009 case AT91SAM9261_ID_US2: 998 case AT91SAM9261_ID_US2:
1010 pdev = &at91sam9261_uart2_device; 999 pdev = &at91sam9261_uart2_device;
1011 configure_usart2_pins(pins); 1000 configure_usart2_pins(pins);
1012 at91_clock_associate("usart2_clk", &pdev->dev, "usart");
1013 break; 1001 break;
1014 default: 1002 default:
1015 return; 1003 return;
1016 } 1004 }
1017 pdev->id = portnr; /* update to mapped ID */ 1005 pdata = pdev->dev.platform_data;
1006 pdata->num = portnr; /* update to mapped ID */
1018 1007
1019 if (portnr < ATMEL_MAX_UART) 1008 if (portnr < ATMEL_MAX_UART)
1020 at91_uarts[portnr] = pdev; 1009 at91_uarts[portnr] = pdev;
@@ -1022,8 +1011,10 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
1022 1011
1023void __init at91_set_serial_console(unsigned portnr) 1012void __init at91_set_serial_console(unsigned portnr)
1024{ 1013{
1025 if (portnr < ATMEL_MAX_UART) 1014 if (portnr < ATMEL_MAX_UART) {
1026 atmel_default_console_device = at91_uarts[portnr]; 1015 atmel_default_console_device = at91_uarts[portnr];
1016 at91sam9261_set_console_clock(portnr);
1017 }
1027} 1018}
1028 1019
1029void __init at91_add_device_serial(void) 1020void __init at91_add_device_serial(void)
diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c
index 249f900954d8..dc28477d14ff 100644
--- a/arch/arm/mach-at91/at91sam9263.c
+++ b/arch/arm/mach-at91/at91sam9263.c
@@ -199,6 +199,23 @@ static struct clk *periph_clocks[] __initdata = {
199 // irq0 .. irq1 199 // irq0 .. irq1
200}; 200};
201 201
202static struct clk_lookup periph_clocks_lookups[] = {
203 CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
204 CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
205 CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk),
206 CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk),
207 CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
208 CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
209 CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb_clk),
210};
211
212static struct clk_lookup usart_clocks_lookups[] = {
213 CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
214 CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
215 CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
216 CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
217};
218
202/* 219/*
203 * The four programmable clocks. 220 * The four programmable clocks.
204 * You must configure pin multiplexing to bring these signals out. 221 * You must configure pin multiplexing to bring these signals out.
@@ -235,12 +252,29 @@ static void __init at91sam9263_register_clocks(void)
235 for (i = 0; i < ARRAY_SIZE(periph_clocks); i++) 252 for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
236 clk_register(periph_clocks[i]); 253 clk_register(periph_clocks[i]);
237 254
255 clkdev_add_table(periph_clocks_lookups,
256 ARRAY_SIZE(periph_clocks_lookups));
257 clkdev_add_table(usart_clocks_lookups,
258 ARRAY_SIZE(usart_clocks_lookups));
259
238 clk_register(&pck0); 260 clk_register(&pck0);
239 clk_register(&pck1); 261 clk_register(&pck1);
240 clk_register(&pck2); 262 clk_register(&pck2);
241 clk_register(&pck3); 263 clk_register(&pck3);
242} 264}
243 265
266static struct clk_lookup console_clock_lookup;
267
268void __init at91sam9263_set_console_clock(int id)
269{
270 if (id >= ARRAY_SIZE(usart_clocks_lookups))
271 return;
272
273 console_clock_lookup.con_id = "usart";
274 console_clock_lookup.clk = usart_clocks_lookups[id].clk;
275 clkdev_add(&console_clock_lookup);
276}
277
244/* -------------------------------------------------------------------- 278/* --------------------------------------------------------------------
245 * GPIO 279 * GPIO
246 * -------------------------------------------------------------------- */ 280 * -------------------------------------------------------------------- */
@@ -279,11 +313,14 @@ static void at91sam9263_poweroff(void)
279 * AT91SAM9263 processor initialization 313 * AT91SAM9263 processor initialization
280 * -------------------------------------------------------------------- */ 314 * -------------------------------------------------------------------- */
281 315
282void __init at91sam9263_initialize(unsigned long main_clock) 316void __init at91sam9263_map_io(void)
283{ 317{
284 /* Map peripherals */ 318 /* Map peripherals */
285 iotable_init(at91sam9263_io_desc, ARRAY_SIZE(at91sam9263_io_desc)); 319 iotable_init(at91sam9263_io_desc, ARRAY_SIZE(at91sam9263_io_desc));
320}
286 321
322void __init at91sam9263_initialize(unsigned long main_clock)
323{
287 at91_arch_reset = at91sam9_alt_reset; 324 at91_arch_reset = at91sam9_alt_reset;
288 pm_power_off = at91sam9263_poweroff; 325 pm_power_off = at91sam9263_poweroff;
289 at91_extern_irq = (1 << AT91SAM9263_ID_IRQ0) | (1 << AT91SAM9263_ID_IRQ1); 326 at91_extern_irq = (1 << AT91SAM9263_ID_IRQ0) | (1 << AT91SAM9263_ID_IRQ1);
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index fb5c23af1017..ffe081b77ed0 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -308,7 +308,6 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
308 } 308 }
309 309
310 mmc0_data = *data; 310 mmc0_data = *data;
311 at91_clock_associate("mci0_clk", &at91sam9263_mmc0_device.dev, "mci_clk");
312 platform_device_register(&at91sam9263_mmc0_device); 311 platform_device_register(&at91sam9263_mmc0_device);
313 } else { /* MCI1 */ 312 } else { /* MCI1 */
314 /* CLK */ 313 /* CLK */
@@ -339,7 +338,6 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
339 } 338 }
340 339
341 mmc1_data = *data; 340 mmc1_data = *data;
342 at91_clock_associate("mci1_clk", &at91sam9263_mmc1_device.dev, "mci_clk");
343 platform_device_register(&at91sam9263_mmc1_device); 341 platform_device_register(&at91sam9263_mmc1_device);
344 } 342 }
345} 343}
@@ -686,7 +684,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
686 at91_set_B_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */ 684 at91_set_B_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */
687 at91_set_B_periph(AT91_PIN_PA2, 0); /* SPI0_SPCK */ 685 at91_set_B_periph(AT91_PIN_PA2, 0); /* SPI0_SPCK */
688 686
689 at91_clock_associate("spi0_clk", &at91sam9263_spi0_device.dev, "spi_clk");
690 platform_device_register(&at91sam9263_spi0_device); 687 platform_device_register(&at91sam9263_spi0_device);
691 } 688 }
692 if (enable_spi1) { 689 if (enable_spi1) {
@@ -694,7 +691,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
694 at91_set_A_periph(AT91_PIN_PB13, 0); /* SPI1_MOSI */ 691 at91_set_A_periph(AT91_PIN_PB13, 0); /* SPI1_MOSI */
695 at91_set_A_periph(AT91_PIN_PB14, 0); /* SPI1_SPCK */ 692 at91_set_A_periph(AT91_PIN_PB14, 0); /* SPI1_SPCK */
696 693
697 at91_clock_associate("spi1_clk", &at91sam9263_spi1_device.dev, "spi_clk");
698 platform_device_register(&at91sam9263_spi1_device); 694 platform_device_register(&at91sam9263_spi1_device);
699 } 695 }
700} 696}
@@ -941,8 +937,6 @@ static struct platform_device at91sam9263_tcb_device = {
941 937
942static void __init at91_add_device_tc(void) 938static void __init at91_add_device_tc(void)
943{ 939{
944 /* this chip has one clock and irq for all three TC channels */
945 at91_clock_associate("tcb_clk", &at91sam9263_tcb_device.dev, "t0_clk");
946 platform_device_register(&at91sam9263_tcb_device); 940 platform_device_register(&at91sam9263_tcb_device);
947} 941}
948#else 942#else
@@ -1171,12 +1165,10 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins)
1171 case AT91SAM9263_ID_SSC0: 1165 case AT91SAM9263_ID_SSC0:
1172 pdev = &at91sam9263_ssc0_device; 1166 pdev = &at91sam9263_ssc0_device;
1173 configure_ssc0_pins(pins); 1167 configure_ssc0_pins(pins);
1174 at91_clock_associate("ssc0_clk", &pdev->dev, "pclk");
1175 break; 1168 break;
1176 case AT91SAM9263_ID_SSC1: 1169 case AT91SAM9263_ID_SSC1:
1177 pdev = &at91sam9263_ssc1_device; 1170 pdev = &at91sam9263_ssc1_device;
1178 configure_ssc1_pins(pins); 1171 configure_ssc1_pins(pins);
1179 at91_clock_associate("ssc1_clk", &pdev->dev, "pclk");
1180 break; 1172 break;
1181 default: 1173 default:
1182 return; 1174 return;
@@ -1370,32 +1362,30 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
1370void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) 1362void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
1371{ 1363{
1372 struct platform_device *pdev; 1364 struct platform_device *pdev;
1365 struct atmel_uart_data *pdata;
1373 1366
1374 switch (id) { 1367 switch (id) {
1375 case 0: /* DBGU */ 1368 case 0: /* DBGU */
1376 pdev = &at91sam9263_dbgu_device; 1369 pdev = &at91sam9263_dbgu_device;
1377 configure_dbgu_pins(); 1370 configure_dbgu_pins();
1378 at91_clock_associate("mck", &pdev->dev, "usart");
1379 break; 1371 break;
1380 case AT91SAM9263_ID_US0: 1372 case AT91SAM9263_ID_US0:
1381 pdev = &at91sam9263_uart0_device; 1373 pdev = &at91sam9263_uart0_device;
1382 configure_usart0_pins(pins); 1374 configure_usart0_pins(pins);
1383 at91_clock_associate("usart0_clk", &pdev->dev, "usart");
1384 break; 1375 break;
1385 case AT91SAM9263_ID_US1: 1376 case AT91SAM9263_ID_US1:
1386 pdev = &at91sam9263_uart1_device; 1377 pdev = &at91sam9263_uart1_device;
1387 configure_usart1_pins(pins); 1378 configure_usart1_pins(pins);
1388 at91_clock_associate("usart1_clk", &pdev->dev, "usart");
1389 break; 1379 break;
1390 case AT91SAM9263_ID_US2: 1380 case AT91SAM9263_ID_US2:
1391 pdev = &at91sam9263_uart2_device; 1381 pdev = &at91sam9263_uart2_device;
1392 configure_usart2_pins(pins); 1382 configure_usart2_pins(pins);
1393 at91_clock_associate("usart2_clk", &pdev->dev, "usart");
1394 break; 1383 break;
1395 default: 1384 default:
1396 return; 1385 return;
1397 } 1386 }
1398 pdev->id = portnr; /* update to mapped ID */ 1387 pdata = pdev->dev.platform_data;
1388 pdata->num = portnr; /* update to mapped ID */
1399 1389
1400 if (portnr < ATMEL_MAX_UART) 1390 if (portnr < ATMEL_MAX_UART)
1401 at91_uarts[portnr] = pdev; 1391 at91_uarts[portnr] = pdev;
@@ -1403,8 +1393,10 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
1403 1393
1404void __init at91_set_serial_console(unsigned portnr) 1394void __init at91_set_serial_console(unsigned portnr)
1405{ 1395{
1406 if (portnr < ATMEL_MAX_UART) 1396 if (portnr < ATMEL_MAX_UART) {
1407 atmel_default_console_device = at91_uarts[portnr]; 1397 atmel_default_console_device = at91_uarts[portnr];
1398 at91sam9263_set_console_clock(portnr);
1399 }
1408} 1400}
1409 1401
1410void __init at91_add_device_serial(void) 1402void __init at91_add_device_serial(void)
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index c67b47f1c0fd..2bb6ff9af1c7 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -184,22 +184,6 @@ static struct clk vdec_clk = {
184 .type = CLK_TYPE_PERIPHERAL, 184 .type = CLK_TYPE_PERIPHERAL,
185}; 185};
186 186
187/* One additional fake clock for ohci */
188static struct clk ohci_clk = {
189 .name = "ohci_clk",
190 .pmc_mask = 0,
191 .type = CLK_TYPE_PERIPHERAL,
192 .parent = &uhphs_clk,
193};
194
195/* One additional fake clock for second TC block */
196static struct clk tcb1_clk = {
197 .name = "tcb1_clk",
198 .pmc_mask = 0,
199 .type = CLK_TYPE_PERIPHERAL,
200 .parent = &tcb0_clk,
201};
202
203static struct clk *periph_clocks[] __initdata = { 187static struct clk *periph_clocks[] __initdata = {
204 &pioA_clk, 188 &pioA_clk,
205 &pioB_clk, 189 &pioB_clk,
@@ -228,8 +212,30 @@ static struct clk *periph_clocks[] __initdata = {
228 &udphs_clk, 212 &udphs_clk,
229 &mmc1_clk, 213 &mmc1_clk,
230 // irq0 214 // irq0
231 &ohci_clk, 215};
232 &tcb1_clk, 216
217static struct clk_lookup periph_clocks_lookups[] = {
218 /* One additional fake clock for ohci */
219 CLKDEV_CON_ID("ohci_clk", &uhphs_clk),
220 CLKDEV_CON_DEV_ID("ehci_clk", "atmel-ehci.0", &uhphs_clk),
221 CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk),
222 CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk),
223 CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk),
224 CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk),
225 CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
226 CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
227 CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb0_clk),
228 CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tcb0_clk),
229 CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
230 CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
231};
232
233static struct clk_lookup usart_clocks_lookups[] = {
234 CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
235 CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
236 CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
237 CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
238 CLKDEV_CON_DEV_ID("usart", "atmel_usart.4", &usart3_clk),
233}; 239};
234 240
235/* 241/*
@@ -256,6 +262,11 @@ static void __init at91sam9g45_register_clocks(void)
256 for (i = 0; i < ARRAY_SIZE(periph_clocks); i++) 262 for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
257 clk_register(periph_clocks[i]); 263 clk_register(periph_clocks[i]);
258 264
265 clkdev_add_table(periph_clocks_lookups,
266 ARRAY_SIZE(periph_clocks_lookups));
267 clkdev_add_table(usart_clocks_lookups,
268 ARRAY_SIZE(usart_clocks_lookups));
269
259 if (cpu_is_at91sam9m10() || cpu_is_at91sam9m11()) 270 if (cpu_is_at91sam9m10() || cpu_is_at91sam9m11())
260 clk_register(&vdec_clk); 271 clk_register(&vdec_clk);
261 272
@@ -263,6 +274,18 @@ static void __init at91sam9g45_register_clocks(void)
263 clk_register(&pck1); 274 clk_register(&pck1);
264} 275}
265 276
277static struct clk_lookup console_clock_lookup;
278
279void __init at91sam9g45_set_console_clock(int id)
280{
281 if (id >= ARRAY_SIZE(usart_clocks_lookups))
282 return;
283
284 console_clock_lookup.con_id = "usart";
285 console_clock_lookup.clk = usart_clocks_lookups[id].clk;
286 clkdev_add(&console_clock_lookup);
287}
288
266/* -------------------------------------------------------------------- 289/* --------------------------------------------------------------------
267 * GPIO 290 * GPIO
268 * -------------------------------------------------------------------- */ 291 * -------------------------------------------------------------------- */
@@ -306,11 +329,14 @@ static void at91sam9g45_poweroff(void)
306 * AT91SAM9G45 processor initialization 329 * AT91SAM9G45 processor initialization
307 * -------------------------------------------------------------------- */ 330 * -------------------------------------------------------------------- */
308 331
309void __init at91sam9g45_initialize(unsigned long main_clock) 332void __init at91sam9g45_map_io(void)
310{ 333{
311 /* Map peripherals */ 334 /* Map peripherals */
312 iotable_init(at91sam9g45_io_desc, ARRAY_SIZE(at91sam9g45_io_desc)); 335 iotable_init(at91sam9g45_io_desc, ARRAY_SIZE(at91sam9g45_io_desc));
336}
313 337
338void __init at91sam9g45_initialize(unsigned long main_clock)
339{
314 at91_arch_reset = at91sam9g45_reset; 340 at91_arch_reset = at91sam9g45_reset;
315 pm_power_off = at91sam9g45_poweroff; 341 pm_power_off = at91sam9g45_poweroff;
316 at91_extern_irq = (1 << AT91SAM9G45_ID_IRQ0); 342 at91_extern_irq = (1 << AT91SAM9G45_ID_IRQ0);
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 5e9f8a4c38df..05674865bc21 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -180,7 +180,6 @@ void __init at91_add_device_usbh_ehci(struct at91_usbh_data *data)
180 } 180 }
181 181
182 usbh_ehci_data = *data; 182 usbh_ehci_data = *data;
183 at91_clock_associate("uhphs_clk", &at91_usbh_ehci_device.dev, "ehci_clk");
184 platform_device_register(&at91_usbh_ehci_device); 183 platform_device_register(&at91_usbh_ehci_device);
185} 184}
186#else 185#else
@@ -266,10 +265,6 @@ void __init at91_add_device_usba(struct usba_platform_data *data)
266 265
267 /* Pullup pin is handled internally by USB device peripheral */ 266 /* Pullup pin is handled internally by USB device peripheral */
268 267
269 /* Clocks */
270 at91_clock_associate("utmi_clk", &at91_usba_udc_device.dev, "hclk");
271 at91_clock_associate("udphs_clk", &at91_usba_udc_device.dev, "pclk");
272
273 platform_device_register(&at91_usba_udc_device); 268 platform_device_register(&at91_usba_udc_device);
274} 269}
275#else 270#else
@@ -478,7 +473,6 @@ void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data)
478 } 473 }
479 474
480 mmc0_data = *data; 475 mmc0_data = *data;
481 at91_clock_associate("mci0_clk", &at91sam9g45_mmc0_device.dev, "mci_clk");
482 platform_device_register(&at91sam9g45_mmc0_device); 476 platform_device_register(&at91sam9g45_mmc0_device);
483 477
484 } else { /* MCI1 */ 478 } else { /* MCI1 */
@@ -504,7 +498,6 @@ void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data)
504 } 498 }
505 499
506 mmc1_data = *data; 500 mmc1_data = *data;
507 at91_clock_associate("mci1_clk", &at91sam9g45_mmc1_device.dev, "mci_clk");
508 platform_device_register(&at91sam9g45_mmc1_device); 501 platform_device_register(&at91sam9g45_mmc1_device);
509 502
510 } 503 }
@@ -801,7 +794,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
801 at91_set_A_periph(AT91_PIN_PB1, 0); /* SPI0_MOSI */ 794 at91_set_A_periph(AT91_PIN_PB1, 0); /* SPI0_MOSI */
802 at91_set_A_periph(AT91_PIN_PB2, 0); /* SPI0_SPCK */ 795 at91_set_A_periph(AT91_PIN_PB2, 0); /* SPI0_SPCK */
803 796
804 at91_clock_associate("spi0_clk", &at91sam9g45_spi0_device.dev, "spi_clk");
805 platform_device_register(&at91sam9g45_spi0_device); 797 platform_device_register(&at91sam9g45_spi0_device);
806 } 798 }
807 if (enable_spi1) { 799 if (enable_spi1) {
@@ -809,7 +801,6 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
809 at91_set_A_periph(AT91_PIN_PB15, 0); /* SPI1_MOSI */ 801 at91_set_A_periph(AT91_PIN_PB15, 0); /* SPI1_MOSI */
810 at91_set_A_periph(AT91_PIN_PB16, 0); /* SPI1_SPCK */ 802 at91_set_A_periph(AT91_PIN_PB16, 0); /* SPI1_SPCK */
811 803
812 at91_clock_associate("spi1_clk", &at91sam9g45_spi1_device.dev, "spi_clk");
813 platform_device_register(&at91sam9g45_spi1_device); 804 platform_device_register(&at91sam9g45_spi1_device);
814 } 805 }
815} 806}
@@ -999,10 +990,7 @@ static struct platform_device at91sam9g45_tcb1_device = {
999 990
1000static void __init at91_add_device_tc(void) 991static void __init at91_add_device_tc(void)
1001{ 992{
1002 /* this chip has one clock and irq for all six TC channels */
1003 at91_clock_associate("tcb0_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
1004 platform_device_register(&at91sam9g45_tcb0_device); 993 platform_device_register(&at91sam9g45_tcb0_device);
1005 at91_clock_associate("tcb1_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
1006 platform_device_register(&at91sam9g45_tcb1_device); 994 platform_device_register(&at91sam9g45_tcb1_device);
1007} 995}
1008#else 996#else
@@ -1286,12 +1274,10 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins)
1286 case AT91SAM9G45_ID_SSC0: 1274 case AT91SAM9G45_ID_SSC0:
1287 pdev = &at91sam9g45_ssc0_device; 1275 pdev = &at91sam9g45_ssc0_device;
1288 configure_ssc0_pins(pins); 1276 configure_ssc0_pins(pins);
1289 at91_clock_associate("ssc0_clk", &pdev->dev, "pclk");
1290 break; 1277 break;
1291 case AT91SAM9G45_ID_SSC1: 1278 case AT91SAM9G45_ID_SSC1:
1292 pdev = &at91sam9g45_ssc1_device; 1279 pdev = &at91sam9g45_ssc1_device;
1293 configure_ssc1_pins(pins); 1280 configure_ssc1_pins(pins);
1294 at91_clock_associate("ssc1_clk", &pdev->dev, "pclk");
1295 break; 1281 break;
1296 default: 1282 default:
1297 return; 1283 return;
@@ -1527,37 +1513,34 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
1527void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) 1513void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
1528{ 1514{
1529 struct platform_device *pdev; 1515 struct platform_device *pdev;
1516 struct atmel_uart_data *pdata;
1530 1517
1531 switch (id) { 1518 switch (id) {
1532 case 0: /* DBGU */ 1519 case 0: /* DBGU */
1533 pdev = &at91sam9g45_dbgu_device; 1520 pdev = &at91sam9g45_dbgu_device;
1534 configure_dbgu_pins(); 1521 configure_dbgu_pins();
1535 at91_clock_associate("mck", &pdev->dev, "usart");
1536 break; 1522 break;
1537 case AT91SAM9G45_ID_US0: 1523 case AT91SAM9G45_ID_US0:
1538 pdev = &at91sam9g45_uart0_device; 1524 pdev = &at91sam9g45_uart0_device;
1539 configure_usart0_pins(pins); 1525 configure_usart0_pins(pins);
1540 at91_clock_associate("usart0_clk", &pdev->dev, "usart");
1541 break; 1526 break;
1542 case AT91SAM9G45_ID_US1: 1527 case AT91SAM9G45_ID_US1:
1543 pdev = &at91sam9g45_uart1_device; 1528 pdev = &at91sam9g45_uart1_device;
1544 configure_usart1_pins(pins); 1529 configure_usart1_pins(pins);
1545 at91_clock_associate("usart1_clk", &pdev->dev, "usart");
1546 break; 1530 break;
1547 case AT91SAM9G45_ID_US2: 1531 case AT91SAM9G45_ID_US2:
1548 pdev = &at91sam9g45_uart2_device; 1532 pdev = &at91sam9g45_uart2_device;
1549 configure_usart2_pins(pins); 1533 configure_usart2_pins(pins);
1550 at91_clock_associate("usart2_clk", &pdev->dev, "usart");
1551 break; 1534 break;
1552 case AT91SAM9G45_ID_US3: 1535 case AT91SAM9G45_ID_US3:
1553 pdev = &at91sam9g45_uart3_device; 1536 pdev = &at91sam9g45_uart3_device;
1554 configure_usart3_pins(pins); 1537 configure_usart3_pins(pins);
1555 at91_clock_associate("usart3_clk", &pdev->dev, "usart");
1556 break; 1538 break;
1557 default: 1539 default:
1558 return; 1540 return;
1559 } 1541 }
1560 pdev->id = portnr; /* update to mapped ID */ 1542 pdata = pdev->dev.platform_data;
1543 pdata->num = portnr; /* update to mapped ID */
1561 1544
1562 if (portnr < ATMEL_MAX_UART) 1545 if (portnr < ATMEL_MAX_UART)
1563 at91_uarts[portnr] = pdev; 1546 at91_uarts[portnr] = pdev;
@@ -1565,8 +1548,10 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
1565 1548
1566void __init at91_set_serial_console(unsigned portnr) 1549void __init at91_set_serial_console(unsigned portnr)
1567{ 1550{
1568 if (portnr < ATMEL_MAX_UART) 1551 if (portnr < ATMEL_MAX_UART) {
1569 atmel_default_console_device = at91_uarts[portnr]; 1552 atmel_default_console_device = at91_uarts[portnr];
1553 at91sam9g45_set_console_clock(portnr);
1554 }
1570} 1555}
1571 1556
1572void __init at91_add_device_serial(void) 1557void __init at91_add_device_serial(void)
diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c
index 6a9d24e5ed8e..1a40f16b66c8 100644
--- a/arch/arm/mach-at91/at91sam9rl.c
+++ b/arch/arm/mach-at91/at91sam9rl.c
@@ -190,6 +190,24 @@ static struct clk *periph_clocks[] __initdata = {
190 // irq0 190 // irq0
191}; 191};
192 192
193static struct clk_lookup periph_clocks_lookups[] = {
194 CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk),
195 CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk),
196 CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
197 CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
198 CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
199 CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
200 CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
201};
202
203static struct clk_lookup usart_clocks_lookups[] = {
204 CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
205 CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
206 CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
207 CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
208 CLKDEV_CON_DEV_ID("usart", "atmel_usart.4", &usart3_clk),
209};
210
193/* 211/*
194 * The two programmable clocks. 212 * The two programmable clocks.
195 * You must configure pin multiplexing to bring these signals out. 213 * You must configure pin multiplexing to bring these signals out.
@@ -214,10 +232,27 @@ static void __init at91sam9rl_register_clocks(void)
214 for (i = 0; i < ARRAY_SIZE(periph_clocks); i++) 232 for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
215 clk_register(periph_clocks[i]); 233 clk_register(periph_clocks[i]);
216 234
235 clkdev_add_table(periph_clocks_lookups,
236 ARRAY_SIZE(periph_clocks_lookups));
237 clkdev_add_table(usart_clocks_lookups,
238 ARRAY_SIZE(usart_clocks_lookups));
239
217 clk_register(&pck0); 240 clk_register(&pck0);
218 clk_register(&pck1); 241 clk_register(&pck1);
219} 242}
220 243
244static struct clk_lookup console_clock_lookup;
245
246void __init at91sam9rl_set_console_clock(int id)
247{
248 if (id >= ARRAY_SIZE(usart_clocks_lookups))
249 return;
250
251 console_clock_lookup.con_id = "usart";
252 console_clock_lookup.clk = usart_clocks_lookups[id].clk;
253 clkdev_add(&console_clock_lookup);
254}
255
221/* -------------------------------------------------------------------- 256/* --------------------------------------------------------------------
222 * GPIO 257 * GPIO
223 * -------------------------------------------------------------------- */ 258 * -------------------------------------------------------------------- */
@@ -252,7 +287,7 @@ static void at91sam9rl_poweroff(void)
252 * AT91SAM9RL processor initialization 287 * AT91SAM9RL processor initialization
253 * -------------------------------------------------------------------- */ 288 * -------------------------------------------------------------------- */
254 289
255void __init at91sam9rl_initialize(unsigned long main_clock) 290void __init at91sam9rl_map_io(void)
256{ 291{
257 unsigned long cidr, sram_size; 292 unsigned long cidr, sram_size;
258 293
@@ -275,7 +310,10 @@ void __init at91sam9rl_initialize(unsigned long main_clock)
275 310
276 /* Map SRAM */ 311 /* Map SRAM */
277 iotable_init(at91sam9rl_sram_desc, ARRAY_SIZE(at91sam9rl_sram_desc)); 312 iotable_init(at91sam9rl_sram_desc, ARRAY_SIZE(at91sam9rl_sram_desc));
313}
278 314
315void __init at91sam9rl_initialize(unsigned long main_clock)
316{
279 at91_arch_reset = at91sam9_alt_reset; 317 at91_arch_reset = at91sam9_alt_reset;
280 pm_power_off = at91sam9rl_poweroff; 318 pm_power_off = at91sam9rl_poweroff;
281 at91_extern_irq = (1 << AT91SAM9RL_ID_IRQ0); 319 at91_extern_irq = (1 << AT91SAM9RL_ID_IRQ0);
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index c49262bddd85..c296045f2b6a 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -155,10 +155,6 @@ void __init at91_add_device_usba(struct usba_platform_data *data)
155 155
156 /* Pullup pin is handled internally by USB device peripheral */ 156 /* Pullup pin is handled internally by USB device peripheral */
157 157
158 /* Clocks */
159 at91_clock_associate("utmi_clk", &at91_usba_udc_device.dev, "hclk");
160 at91_clock_associate("udphs_clk", &at91_usba_udc_device.dev, "pclk");
161
162 platform_device_register(&at91_usba_udc_device); 158 platform_device_register(&at91_usba_udc_device);
163} 159}
164#else 160#else
@@ -605,10 +601,6 @@ static struct platform_device at91sam9rl_tcb_device = {
605 601
606static void __init at91_add_device_tc(void) 602static void __init at91_add_device_tc(void)
607{ 603{
608 /* this chip has a separate clock and irq for each TC channel */
609 at91_clock_associate("tc0_clk", &at91sam9rl_tcb_device.dev, "t0_clk");
610 at91_clock_associate("tc1_clk", &at91sam9rl_tcb_device.dev, "t1_clk");
611 at91_clock_associate("tc2_clk", &at91sam9rl_tcb_device.dev, "t2_clk");
612 platform_device_register(&at91sam9rl_tcb_device); 604 platform_device_register(&at91sam9rl_tcb_device);
613} 605}
614#else 606#else
@@ -892,12 +884,10 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins)
892 case AT91SAM9RL_ID_SSC0: 884 case AT91SAM9RL_ID_SSC0:
893 pdev = &at91sam9rl_ssc0_device; 885 pdev = &at91sam9rl_ssc0_device;
894 configure_ssc0_pins(pins); 886 configure_ssc0_pins(pins);
895 at91_clock_associate("ssc0_clk", &pdev->dev, "pclk");
896 break; 887 break;
897 case AT91SAM9RL_ID_SSC1: 888 case AT91SAM9RL_ID_SSC1:
898 pdev = &at91sam9rl_ssc1_device; 889 pdev = &at91sam9rl_ssc1_device;
899 configure_ssc1_pins(pins); 890 configure_ssc1_pins(pins);
900 at91_clock_associate("ssc1_clk", &pdev->dev, "pclk");
901 break; 891 break;
902 default: 892 default:
903 return; 893 return;
@@ -1141,37 +1131,34 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
1141void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) 1131void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
1142{ 1132{
1143 struct platform_device *pdev; 1133 struct platform_device *pdev;
1134 struct atmel_uart_data *pdata;
1144 1135
1145 switch (id) { 1136 switch (id) {
1146 case 0: /* DBGU */ 1137 case 0: /* DBGU */
1147 pdev = &at91sam9rl_dbgu_device; 1138 pdev = &at91sam9rl_dbgu_device;
1148 configure_dbgu_pins(); 1139 configure_dbgu_pins();
1149 at91_clock_associate("mck", &pdev->dev, "usart");
1150 break; 1140 break;
1151 case AT91SAM9RL_ID_US0: 1141 case AT91SAM9RL_ID_US0:
1152 pdev = &at91sam9rl_uart0_device; 1142 pdev = &at91sam9rl_uart0_device;
1153 configure_usart0_pins(pins); 1143 configure_usart0_pins(pins);
1154 at91_clock_associate("usart0_clk", &pdev->dev, "usart");
1155 break; 1144 break;
1156 case AT91SAM9RL_ID_US1: 1145 case AT91SAM9RL_ID_US1:
1157 pdev = &at91sam9rl_uart1_device; 1146 pdev = &at91sam9rl_uart1_device;
1158 configure_usart1_pins(pins); 1147 configure_usart1_pins(pins);
1159 at91_clock_associate("usart1_clk", &pdev->dev, "usart");
1160 break; 1148 break;
1161 case AT91SAM9RL_ID_US2: 1149 case AT91SAM9RL_ID_US2:
1162 pdev = &at91sam9rl_uart2_device; 1150 pdev = &at91sam9rl_uart2_device;
1163 configure_usart2_pins(pins); 1151 configure_usart2_pins(pins);
1164 at91_clock_associate("usart2_clk", &pdev->dev, "usart");
1165 break; 1152 break;
1166 case AT91SAM9RL_ID_US3: 1153 case AT91SAM9RL_ID_US3:
1167 pdev = &at91sam9rl_uart3_device; 1154 pdev = &at91sam9rl_uart3_device;
1168 configure_usart3_pins(pins); 1155 configure_usart3_pins(pins);
1169 at91_clock_associate("usart3_clk", &pdev->dev, "usart");
1170 break; 1156 break;
1171 default: 1157 default:
1172 return; 1158 return;
1173 } 1159 }
1174 pdev->id = portnr; /* update to mapped ID */ 1160 pdata = pdev->dev.platform_data;
1161 pdata->num = portnr; /* update to mapped ID */
1175 1162
1176 if (portnr < ATMEL_MAX_UART) 1163 if (portnr < ATMEL_MAX_UART)
1177 at91_uarts[portnr] = pdev; 1164 at91_uarts[portnr] = pdev;
@@ -1179,8 +1166,10 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
1179 1166
1180void __init at91_set_serial_console(unsigned portnr) 1167void __init at91_set_serial_console(unsigned portnr)
1181{ 1168{
1182 if (portnr < ATMEL_MAX_UART) 1169 if (portnr < ATMEL_MAX_UART) {
1183 atmel_default_console_device = at91_uarts[portnr]; 1170 atmel_default_console_device = at91_uarts[portnr];
1171 at91sam9rl_set_console_clock(portnr);
1172 }
1184} 1173}
1185 1174
1186void __init at91_add_device_serial(void) 1175void __init at91_add_device_serial(void)
diff --git a/arch/arm/mach-at91/at91x40.c b/arch/arm/mach-at91/at91x40.c
index ad3ec85b2790..56ba3bd035ae 100644
--- a/arch/arm/mach-at91/at91x40.c
+++ b/arch/arm/mach-at91/at91x40.c
@@ -37,11 +37,6 @@ unsigned long clk_get_rate(struct clk *clk)
37 return AT91X40_MASTER_CLOCK; 37 return AT91X40_MASTER_CLOCK;
38} 38}
39 39
40struct clk *clk_get(struct device *dev, const char *id)
41{
42 return NULL;
43}
44
45void __init at91x40_initialize(unsigned long main_clock) 40void __init at91x40_initialize(unsigned long main_clock)
46{ 41{
47 at91_extern_irq = (1 << AT91X40_ID_IRQ0) | (1 << AT91X40_ID_IRQ1) 42 at91_extern_irq = (1 << AT91X40_ID_IRQ0) | (1 << AT91X40_ID_IRQ1)
diff --git a/arch/arm/mach-at91/board-1arm.c b/arch/arm/mach-at91/board-1arm.c
index 8a3fc84847c1..ab1d463aa47d 100644
--- a/arch/arm/mach-at91/board-1arm.c
+++ b/arch/arm/mach-at91/board-1arm.c
@@ -35,14 +35,18 @@
35 35
36#include <mach/board.h> 36#include <mach/board.h>
37#include <mach/gpio.h> 37#include <mach/gpio.h>
38#include <mach/cpu.h>
38 39
39#include "generic.h" 40#include "generic.h"
40 41
41 42
42static void __init onearm_map_io(void) 43static void __init onearm_init_early(void)
43{ 44{
45 /* Set cpu type: PQFP */
46 at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
47
44 /* Initialize processor: 18.432 MHz crystal */ 48 /* Initialize processor: 18.432 MHz crystal */
45 at91rm9200_initialize(18432000, AT91RM9200_PQFP); 49 at91rm9200_initialize(18432000);
46 50
47 /* DBGU on ttyS0. (Rx & Tx only) */ 51 /* DBGU on ttyS0. (Rx & Tx only) */
48 at91_register_uart(0, 0, 0); 52 at91_register_uart(0, 0, 0);
@@ -92,9 +96,9 @@ static void __init onearm_board_init(void)
92 96
93MACHINE_START(ONEARM, "Ajeco 1ARM single board computer") 97MACHINE_START(ONEARM, "Ajeco 1ARM single board computer")
94 /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ 98 /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
95 .boot_params = AT91_SDRAM_BASE + 0x100,
96 .timer = &at91rm9200_timer, 99 .timer = &at91rm9200_timer,
97 .map_io = onearm_map_io, 100 .map_io = at91rm9200_map_io,
101 .init_early = onearm_init_early,
98 .init_irq = onearm_init_irq, 102 .init_irq = onearm_init_irq,
99 .init_machine = onearm_board_init, 103 .init_machine = onearm_board_init,
100MACHINE_END 104MACHINE_END
diff --git a/arch/arm/mach-at91/board-afeb-9260v1.c b/arch/arm/mach-at91/board-afeb-9260v1.c
index cba7f7771fee..a4924de48c36 100644
--- a/arch/arm/mach-at91/board-afeb-9260v1.c
+++ b/arch/arm/mach-at91/board-afeb-9260v1.c
@@ -48,7 +48,7 @@
48#include "generic.h" 48#include "generic.h"
49 49
50 50
51static void __init afeb9260_map_io(void) 51static void __init afeb9260_init_early(void)
52{ 52{
53 /* Initialize processor: 18.432 MHz crystal */ 53 /* Initialize processor: 18.432 MHz crystal */
54 at91sam9260_initialize(18432000); 54 at91sam9260_initialize(18432000);
@@ -218,9 +218,9 @@ static void __init afeb9260_board_init(void)
218 218
219MACHINE_START(AFEB9260, "Custom afeb9260 board") 219MACHINE_START(AFEB9260, "Custom afeb9260 board")
220 /* Maintainer: Sergey Lapin <slapin@ossfans.org> */ 220 /* Maintainer: Sergey Lapin <slapin@ossfans.org> */
221 .boot_params = AT91_SDRAM_BASE + 0x100,
222 .timer = &at91sam926x_timer, 221 .timer = &at91sam926x_timer,
223 .map_io = afeb9260_map_io, 222 .map_io = at91sam9260_map_io,
223 .init_early = afeb9260_init_early,
224 .init_irq = afeb9260_init_irq, 224 .init_irq = afeb9260_init_irq,
225 .init_machine = afeb9260_board_init, 225 .init_machine = afeb9260_board_init,
226MACHINE_END 226MACHINE_END
diff --git a/arch/arm/mach-at91/board-at572d940hf_ek.c b/arch/arm/mach-at91/board-at572d940hf_ek.c
deleted file mode 100644
index 3929f1c9e4e5..000000000000
--- a/arch/arm/mach-at91/board-at572d940hf_ek.c
+++ /dev/null
@@ -1,326 +0,0 @@
1/*
2 * linux/arch/arm/mach-at91/board-at572d940hf_ek.c
3 *
4 * Copyright (C) 2008 Atmel Antonio R. Costa <costa.antonior@gmail.com>
5 * Copyright (C) 2005 SAN People
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/types.h>
23#include <linux/init.h>
24#include <linux/mm.h>
25#include <linux/module.h>
26#include <linux/platform_device.h>
27#include <linux/spi/spi.h>
28#include <linux/spi/ds1305.h>
29#include <linux/irq.h>
30#include <linux/mtd/physmap.h>
31
32#include <mach/hardware.h>
33#include <asm/setup.h>
34#include <asm/mach-types.h>
35#include <asm/irq.h>
36
37#include <asm/mach/arch.h>
38#include <asm/mach/map.h>
39#include <asm/mach/irq.h>
40
41#include <mach/board.h>
42#include <mach/gpio.h>
43#include <mach/at91sam9_smc.h>
44
45#include "sam9_smc.h"
46#include "generic.h"
47
48
49static void __init eb_map_io(void)
50{
51 /* Initialize processor: 12.500 MHz crystal */
52 at572d940hf_initialize(12000000);
53
54 /* DBGU on ttyS0. (Rx & Tx only) */
55 at91_register_uart(0, 0, 0);
56
57 /* USART0 on ttyS1. (Rx & Tx only) */
58 at91_register_uart(AT572D940HF_ID_US0, 1, 0);
59
60 /* USART1 on ttyS2. (Rx & Tx only) */
61 at91_register_uart(AT572D940HF_ID_US1, 2, 0);
62
63 /* USART2 on ttyS3. (Tx & Rx only */
64 at91_register_uart(AT572D940HF_ID_US2, 3, 0);
65
66 /* set serial console to ttyS0 (ie, DBGU) */
67 at91_set_serial_console(0);
68}
69
70static void __init eb_init_irq(void)
71{
72 at572d940hf_init_interrupts(NULL);
73}
74
75
76/*
77 * USB Host Port
78 */
79static struct at91_usbh_data __initdata eb_usbh_data = {
80 .ports = 2,
81};
82
83
84/*
85 * USB Device Port
86 */
87static struct at91_udc_data __initdata eb_udc_data = {
88 .vbus_pin = 0, /* no VBUS detection,UDC always on */
89 .pullup_pin = 0, /* pull-up driven by UDC */
90};
91
92
93/*
94 * MCI (SD/MMC)
95 */
96static struct at91_mmc_data __initdata eb_mmc_data = {
97 .wire4 = 1,
98/* .det_pin = ... not connected */
99/* .wp_pin = ... not connected */
100/* .vcc_pin = ... not connected */
101};
102
103
104/*
105 * MACB Ethernet device
106 */
107static struct at91_eth_data __initdata eb_eth_data = {
108 .phy_irq_pin = AT91_PIN_PB25,
109 .is_rmii = 1,
110};
111
112/*
113 * NOR flash
114 */
115
116static struct mtd_partition eb_nor_partitions[] = {
117 {
118 .name = "Raw Environment",
119 .offset = 0,
120 .size = SZ_4M,
121 .mask_flags = 0,
122 },
123 {
124 .name = "OS FS",
125 .offset = MTDPART_OFS_APPEND,
126 .size = 3 * SZ_1M,
127 .mask_flags = 0,
128 },
129 {
130 .name = "APP FS",
131 .offset = MTDPART_OFS_APPEND,
132 .size = MTDPART_SIZ_FULL,
133 .mask_flags = 0,
134 },
135};
136
137static void nor_flash_set_vpp(struct map_info* mi, int i) {
138};
139
140static struct physmap_flash_data nor_flash_data = {
141 .width = 4,
142 .parts = eb_nor_partitions,
143 .nr_parts = ARRAY_SIZE(eb_nor_partitions),
144 .set_vpp = nor_flash_set_vpp,
145};
146
147static struct resource nor_flash_resources[] = {
148 {
149 .start = AT91_CHIPSELECT_0,
150 .end = AT91_CHIPSELECT_0 + SZ_16M - 1,
151 .flags = IORESOURCE_MEM,
152 },
153};
154
155static struct platform_device nor_flash = {
156 .name = "physmap-flash",
157 .id = 0,
158 .dev = {
159 .platform_data = &nor_flash_data,
160 },
161 .resource = nor_flash_resources,
162 .num_resources = ARRAY_SIZE(nor_flash_resources),
163};
164
165static struct sam9_smc_config __initdata eb_nor_smc_config = {
166 .ncs_read_setup = 1,
167 .nrd_setup = 1,
168 .ncs_write_setup = 1,
169 .nwe_setup = 1,
170
171 .ncs_read_pulse = 7,
172 .nrd_pulse = 7,
173 .ncs_write_pulse = 7,
174 .nwe_pulse = 7,
175
176 .read_cycle = 9,
177 .write_cycle = 9,
178
179 .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_BAT_WRITE | AT91_SMC_DBW_32,
180 .tdf_cycles = 1,
181};
182
183static void __init eb_add_device_nor(void)
184{
185 /* configure chip-select 0 (NOR) */
186 sam9_smc_configure(0, &eb_nor_smc_config);
187 platform_device_register(&nor_flash);
188}
189
190/*
191 * NAND flash
192 */
193static struct mtd_partition __initdata eb_nand_partition[] = {
194 {
195 .name = "Partition 1",
196 .offset = 0,
197 .size = SZ_16M,
198 },
199 {
200 .name = "Partition 2",
201 .offset = MTDPART_OFS_NXTBLK,
202 .size = MTDPART_SIZ_FULL,
203 }
204};
205
206static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
207{
208 *num_partitions = ARRAY_SIZE(eb_nand_partition);
209 return eb_nand_partition;
210}
211
212static struct atmel_nand_data __initdata eb_nand_data = {
213 .ale = 22,
214 .cle = 21,
215/* .det_pin = ... not connected */
216/* .rdy_pin = AT91_PIN_PC16, */
217 .enable_pin = AT91_PIN_PA15,
218 .partition_info = nand_partitions,
219#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
220 .bus_width_16 = 1,
221#else
222 .bus_width_16 = 0,
223#endif
224};
225
226static struct sam9_smc_config __initdata eb_nand_smc_config = {
227 .ncs_read_setup = 0,
228 .nrd_setup = 0,
229 .ncs_write_setup = 1,
230 .nwe_setup = 1,
231
232 .ncs_read_pulse = 3,
233 .nrd_pulse = 3,
234 .ncs_write_pulse = 3,
235 .nwe_pulse = 3,
236
237 .read_cycle = 5,
238 .write_cycle = 5,
239
240 .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE,
241 .tdf_cycles = 12,
242};
243
244static void __init eb_add_device_nand(void)
245{
246 /* setup bus-width (8 or 16) */
247 if (eb_nand_data.bus_width_16)
248 eb_nand_smc_config.mode |= AT91_SMC_DBW_16;
249 else
250 eb_nand_smc_config.mode |= AT91_SMC_DBW_8;
251
252 /* configure chip-select 3 (NAND) */
253 sam9_smc_configure(3, &eb_nand_smc_config);
254
255 at91_add_device_nand(&eb_nand_data);
256}
257
258
259/*
260 * SPI devices
261 */
262static struct resource rtc_resources[] = {
263 [0] = {
264 .start = AT572D940HF_ID_IRQ1,
265 .end = AT572D940HF_ID_IRQ1,
266 .flags = IORESOURCE_IRQ,
267 },
268};
269
270static struct ds1305_platform_data ds1306_data = {
271 .is_ds1306 = true,
272 .en_1hz = false,
273};
274
275static struct spi_board_info eb_spi_devices[] = {
276 { /* RTC Dallas DS1306 */
277 .modalias = "rtc-ds1305",
278 .chip_select = 3,
279 .mode = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA,
280 .max_speed_hz = 500000,
281 .bus_num = 0,
282 .irq = AT572D940HF_ID_IRQ1,
283 .platform_data = (void *) &ds1306_data,
284 },
285#if defined(CONFIG_MTD_AT91_DATAFLASH_CARD)
286 { /* Dataflash card */
287 .modalias = "mtd_dataflash",
288 .chip_select = 0,
289 .max_speed_hz = 15 * 1000 * 1000,
290 .bus_num = 0,
291 },
292#endif
293};
294
295static void __init eb_board_init(void)
296{
297 /* Serial */
298 at91_add_device_serial();
299 /* USB Host */
300 at91_add_device_usbh(&eb_usbh_data);
301 /* USB Device */
302 at91_add_device_udc(&eb_udc_data);
303 /* I2C */
304 at91_add_device_i2c(NULL, 0);
305 /* NOR */
306 eb_add_device_nor();
307 /* NAND */
308 eb_add_device_nand();
309 /* SPI */
310 at91_add_device_spi(eb_spi_devices, ARRAY_SIZE(eb_spi_devices));
311 /* MMC */
312 at91_add_device_mmc(0, &eb_mmc_data);
313 /* Ethernet */
314 at91_add_device_eth(&eb_eth_data);
315 /* mAgic */
316 at91_add_device_mAgic();
317}
318
319MACHINE_START(AT572D940HFEB, "Atmel AT91D940HF-EB")
320 /* Maintainer: Atmel <costa.antonior@gmail.com> */
321 .boot_params = AT91_SDRAM_BASE + 0x100,
322 .timer = &at91sam926x_timer,
323 .map_io = eb_map_io,
324 .init_irq = eb_init_irq,
325 .init_machine = eb_board_init,
326MACHINE_END
diff --git a/arch/arm/mach-at91/board-cam60.c b/arch/arm/mach-at91/board-cam60.c
index b54e3e6fceb6..148fccb9a25a 100644
--- a/arch/arm/mach-at91/board-cam60.c
+++ b/arch/arm/mach-at91/board-cam60.c
@@ -45,7 +45,7 @@
45#include "generic.h" 45#include "generic.h"
46 46
47 47
48static void __init cam60_map_io(void) 48static void __init cam60_init_early(void)
49{ 49{
50 /* Initialize processor: 10 MHz crystal */ 50 /* Initialize processor: 10 MHz crystal */
51 at91sam9260_initialize(10000000); 51 at91sam9260_initialize(10000000);
@@ -198,9 +198,9 @@ static void __init cam60_board_init(void)
198 198
199MACHINE_START(CAM60, "KwikByte CAM60") 199MACHINE_START(CAM60, "KwikByte CAM60")
200 /* Maintainer: KwikByte */ 200 /* Maintainer: KwikByte */
201 .boot_params = AT91_SDRAM_BASE + 0x100,
202 .timer = &at91sam926x_timer, 201 .timer = &at91sam926x_timer,
203 .map_io = cam60_map_io, 202 .map_io = at91sam9260_map_io,
203 .init_early = cam60_init_early,
204 .init_irq = cam60_init_irq, 204 .init_irq = cam60_init_irq,
205 .init_machine = cam60_board_init, 205 .init_machine = cam60_board_init,
206MACHINE_END 206MACHINE_END
diff --git a/arch/arm/mach-at91/board-cap9adk.c b/arch/arm/mach-at91/board-cap9adk.c
index e7274440ead9..1904fdf87613 100644
--- a/arch/arm/mach-at91/board-cap9adk.c
+++ b/arch/arm/mach-at91/board-cap9adk.c
@@ -44,12 +44,13 @@
44#include <mach/gpio.h> 44#include <mach/gpio.h>
45#include <mach/at91cap9_matrix.h> 45#include <mach/at91cap9_matrix.h>
46#include <mach/at91sam9_smc.h> 46#include <mach/at91sam9_smc.h>
47#include <mach/system_rev.h>
47 48
48#include "sam9_smc.h" 49#include "sam9_smc.h"
49#include "generic.h" 50#include "generic.h"
50 51
51 52
52static void __init cap9adk_map_io(void) 53static void __init cap9adk_init_early(void)
53{ 54{
54 /* Initialize processor: 12 MHz crystal */ 55 /* Initialize processor: 12 MHz crystal */
55 at91cap9_initialize(12000000); 56 at91cap9_initialize(12000000);
@@ -187,11 +188,6 @@ static struct atmel_nand_data __initdata cap9adk_nand_data = {
187// .rdy_pin = ... not connected 188// .rdy_pin = ... not connected
188 .enable_pin = AT91_PIN_PD15, 189 .enable_pin = AT91_PIN_PD15,
189 .partition_info = nand_partitions, 190 .partition_info = nand_partitions,
190#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
191 .bus_width_16 = 1,
192#else
193 .bus_width_16 = 0,
194#endif
195}; 191};
196 192
197static struct sam9_smc_config __initdata cap9adk_nand_smc_config = { 193static struct sam9_smc_config __initdata cap9adk_nand_smc_config = {
@@ -219,6 +215,7 @@ static void __init cap9adk_add_device_nand(void)
219 csa = at91_sys_read(AT91_MATRIX_EBICSA); 215 csa = at91_sys_read(AT91_MATRIX_EBICSA);
220 at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_EBI_VDDIOMSEL_3_3V); 216 at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_EBI_VDDIOMSEL_3_3V);
221 217
218 cap9adk_nand_data.bus_width_16 = !board_have_nand_8bit();
222 /* setup bus-width (8 or 16) */ 219 /* setup bus-width (8 or 16) */
223 if (cap9adk_nand_data.bus_width_16) 220 if (cap9adk_nand_data.bus_width_16)
224 cap9adk_nand_smc_config.mode |= AT91_SMC_DBW_16; 221 cap9adk_nand_smc_config.mode |= AT91_SMC_DBW_16;
@@ -399,9 +396,9 @@ static void __init cap9adk_board_init(void)
399 396
400MACHINE_START(AT91CAP9ADK, "Atmel AT91CAP9A-DK") 397MACHINE_START(AT91CAP9ADK, "Atmel AT91CAP9A-DK")
401 /* Maintainer: Stelian Pop <stelian.pop@leadtechdesign.com> */ 398 /* Maintainer: Stelian Pop <stelian.pop@leadtechdesign.com> */
402 .boot_params = AT91_SDRAM_BASE + 0x100,
403 .timer = &at91sam926x_timer, 399 .timer = &at91sam926x_timer,
404 .map_io = cap9adk_map_io, 400 .map_io = at91cap9_map_io,
401 .init_early = cap9adk_init_early,
405 .init_irq = cap9adk_init_irq, 402 .init_irq = cap9adk_init_irq,
406 .init_machine = cap9adk_board_init, 403 .init_machine = cap9adk_board_init,
407MACHINE_END 404MACHINE_END
diff --git a/arch/arm/mach-at91/board-carmeva.c b/arch/arm/mach-at91/board-carmeva.c
index 295e1e77fa60..f36b18687494 100644
--- a/arch/arm/mach-at91/board-carmeva.c
+++ b/arch/arm/mach-at91/board-carmeva.c
@@ -40,10 +40,10 @@
40#include "generic.h" 40#include "generic.h"
41 41
42 42
43static void __init carmeva_map_io(void) 43static void __init carmeva_init_early(void)
44{ 44{
45 /* Initialize processor: 20.000 MHz crystal */ 45 /* Initialize processor: 20.000 MHz crystal */
46 at91rm9200_initialize(20000000, AT91RM9200_BGA); 46 at91rm9200_initialize(20000000);
47 47
48 /* DBGU on ttyS0. (Rx & Tx only) */ 48 /* DBGU on ttyS0. (Rx & Tx only) */
49 at91_register_uart(0, 0, 0); 49 at91_register_uart(0, 0, 0);
@@ -162,9 +162,9 @@ static void __init carmeva_board_init(void)
162 162
163MACHINE_START(CARMEVA, "Carmeva") 163MACHINE_START(CARMEVA, "Carmeva")
164 /* Maintainer: Conitec Datasystems */ 164 /* Maintainer: Conitec Datasystems */
165 .boot_params = AT91_SDRAM_BASE + 0x100,
166 .timer = &at91rm9200_timer, 165 .timer = &at91rm9200_timer,
167 .map_io = carmeva_map_io, 166 .map_io = at91rm9200_map_io,
167 .init_early = carmeva_init_early,
168 .init_irq = carmeva_init_irq, 168 .init_irq = carmeva_init_irq,
169 .init_machine = carmeva_board_init, 169 .init_machine = carmeva_board_init,
170MACHINE_END 170MACHINE_END
diff --git a/arch/arm/mach-at91/board-cpu9krea.c b/arch/arm/mach-at91/board-cpu9krea.c
index 3838594578f3..980511084fe4 100644
--- a/arch/arm/mach-at91/board-cpu9krea.c
+++ b/arch/arm/mach-at91/board-cpu9krea.c
@@ -47,7 +47,7 @@
47#include "sam9_smc.h" 47#include "sam9_smc.h"
48#include "generic.h" 48#include "generic.h"
49 49
50static void __init cpu9krea_map_io(void) 50static void __init cpu9krea_init_early(void)
51{ 51{
52 /* Initialize processor: 18.432 MHz crystal */ 52 /* Initialize processor: 18.432 MHz crystal */
53 at91sam9260_initialize(18432000); 53 at91sam9260_initialize(18432000);
@@ -375,9 +375,9 @@ MACHINE_START(CPUAT9260, "Eukrea CPU9260")
375MACHINE_START(CPUAT9G20, "Eukrea CPU9G20") 375MACHINE_START(CPUAT9G20, "Eukrea CPU9G20")
376#endif 376#endif
377 /* Maintainer: Eric Benard - EUKREA Electromatique */ 377 /* Maintainer: Eric Benard - EUKREA Electromatique */
378 .boot_params = AT91_SDRAM_BASE + 0x100,
379 .timer = &at91sam926x_timer, 378 .timer = &at91sam926x_timer,
380 .map_io = cpu9krea_map_io, 379 .map_io = at91sam9260_map_io,
380 .init_early = cpu9krea_init_early,
381 .init_irq = cpu9krea_init_irq, 381 .init_irq = cpu9krea_init_irq,
382 .init_machine = cpu9krea_board_init, 382 .init_machine = cpu9krea_board_init,
383MACHINE_END 383MACHINE_END
diff --git a/arch/arm/mach-at91/board-cpuat91.c b/arch/arm/mach-at91/board-cpuat91.c
index 2f4dd8cdd484..6daabe3907a1 100644
--- a/arch/arm/mach-at91/board-cpuat91.c
+++ b/arch/arm/mach-at91/board-cpuat91.c
@@ -38,6 +38,7 @@
38#include <mach/board.h> 38#include <mach/board.h>
39#include <mach/gpio.h> 39#include <mach/gpio.h>
40#include <mach/at91rm9200_mc.h> 40#include <mach/at91rm9200_mc.h>
41#include <mach/cpu.h>
41 42
42#include "generic.h" 43#include "generic.h"
43 44
@@ -50,10 +51,13 @@ static struct gpio_led cpuat91_leds[] = {
50 }, 51 },
51}; 52};
52 53
53static void __init cpuat91_map_io(void) 54static void __init cpuat91_init_early(void)
54{ 55{
56 /* Set cpu type: PQFP */
57 at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
58
55 /* Initialize processor: 18.432 MHz crystal */ 59 /* Initialize processor: 18.432 MHz crystal */
56 at91rm9200_initialize(18432000, AT91RM9200_PQFP); 60 at91rm9200_initialize(18432000);
57 61
58 /* DBGU on ttyS0. (Rx & Tx only) */ 62 /* DBGU on ttyS0. (Rx & Tx only) */
59 at91_register_uart(0, 0, 0); 63 at91_register_uart(0, 0, 0);
@@ -175,9 +179,9 @@ static void __init cpuat91_board_init(void)
175 179
176MACHINE_START(CPUAT91, "Eukrea") 180MACHINE_START(CPUAT91, "Eukrea")
177 /* Maintainer: Eric Benard - EUKREA Electromatique */ 181 /* Maintainer: Eric Benard - EUKREA Electromatique */
178 .boot_params = AT91_SDRAM_BASE + 0x100,
179 .timer = &at91rm9200_timer, 182 .timer = &at91rm9200_timer,
180 .map_io = cpuat91_map_io, 183 .map_io = at91rm9200_map_io,
184 .init_early = cpuat91_init_early,
181 .init_irq = cpuat91_init_irq, 185 .init_irq = cpuat91_init_irq,
182 .init_machine = cpuat91_board_init, 186 .init_machine = cpuat91_board_init,
183MACHINE_END 187MACHINE_END
diff --git a/arch/arm/mach-at91/board-csb337.c b/arch/arm/mach-at91/board-csb337.c
index 464839dc39bd..d98bcec1dfe0 100644
--- a/arch/arm/mach-at91/board-csb337.c
+++ b/arch/arm/mach-at91/board-csb337.c
@@ -43,10 +43,10 @@
43#include "generic.h" 43#include "generic.h"
44 44
45 45
46static void __init csb337_map_io(void) 46static void __init csb337_init_early(void)
47{ 47{
48 /* Initialize processor: 3.6864 MHz crystal */ 48 /* Initialize processor: 3.6864 MHz crystal */
49 at91rm9200_initialize(3686400, AT91RM9200_BGA); 49 at91rm9200_initialize(3686400);
50 50
51 /* Setup the LEDs */ 51 /* Setup the LEDs */
52 at91_init_leds(AT91_PIN_PB0, AT91_PIN_PB1); 52 at91_init_leds(AT91_PIN_PB0, AT91_PIN_PB1);
@@ -257,9 +257,9 @@ static void __init csb337_board_init(void)
257 257
258MACHINE_START(CSB337, "Cogent CSB337") 258MACHINE_START(CSB337, "Cogent CSB337")
259 /* Maintainer: Bill Gatliff */ 259 /* Maintainer: Bill Gatliff */
260 .boot_params = AT91_SDRAM_BASE + 0x100,
261 .timer = &at91rm9200_timer, 260 .timer = &at91rm9200_timer,
262 .map_io = csb337_map_io, 261 .map_io = at91rm9200_map_io,
262 .init_early = csb337_init_early,
263 .init_irq = csb337_init_irq, 263 .init_irq = csb337_init_irq,
264 .init_machine = csb337_board_init, 264 .init_machine = csb337_board_init,
265MACHINE_END 265MACHINE_END
diff --git a/arch/arm/mach-at91/board-csb637.c b/arch/arm/mach-at91/board-csb637.c
index 431688c61412..019aab4e20b0 100644
--- a/arch/arm/mach-at91/board-csb637.c
+++ b/arch/arm/mach-at91/board-csb637.c
@@ -40,10 +40,10 @@
40#include "generic.h" 40#include "generic.h"
41 41
42 42
43static void __init csb637_map_io(void) 43static void __init csb637_init_early(void)
44{ 44{
45 /* Initialize processor: 3.6864 MHz crystal */ 45 /* Initialize processor: 3.6864 MHz crystal */
46 at91rm9200_initialize(3686400, AT91RM9200_BGA); 46 at91rm9200_initialize(3686400);
47 47
48 /* DBGU on ttyS0. (Rx & Tx only) */ 48 /* DBGU on ttyS0. (Rx & Tx only) */
49 at91_register_uart(0, 0, 0); 49 at91_register_uart(0, 0, 0);
@@ -138,9 +138,9 @@ static void __init csb637_board_init(void)
138 138
139MACHINE_START(CSB637, "Cogent CSB637") 139MACHINE_START(CSB637, "Cogent CSB637")
140 /* Maintainer: Bill Gatliff */ 140 /* Maintainer: Bill Gatliff */
141 .boot_params = AT91_SDRAM_BASE + 0x100,
142 .timer = &at91rm9200_timer, 141 .timer = &at91rm9200_timer,
143 .map_io = csb637_map_io, 142 .map_io = at91rm9200_map_io,
143 .init_early = csb637_init_early,
144 .init_irq = csb637_init_irq, 144 .init_irq = csb637_init_irq,
145 .init_machine = csb637_board_init, 145 .init_machine = csb637_board_init,
146MACHINE_END 146MACHINE_END
diff --git a/arch/arm/mach-at91/board-eb01.c b/arch/arm/mach-at91/board-eb01.c
index d8df59a3426d..d2023f27c652 100644
--- a/arch/arm/mach-at91/board-eb01.c
+++ b/arch/arm/mach-at91/board-eb01.c
@@ -35,7 +35,7 @@ static void __init at91eb01_init_irq(void)
35 at91x40_init_interrupts(NULL); 35 at91x40_init_interrupts(NULL);
36} 36}
37 37
38static void __init at91eb01_map_io(void) 38static void __init at91eb01_init_early(void)
39{ 39{
40 at91x40_initialize(40000000); 40 at91x40_initialize(40000000);
41} 41}
@@ -43,7 +43,7 @@ static void __init at91eb01_map_io(void)
43MACHINE_START(AT91EB01, "Atmel AT91 EB01") 43MACHINE_START(AT91EB01, "Atmel AT91 EB01")
44 /* Maintainer: Greg Ungerer <gerg@snapgear.com> */ 44 /* Maintainer: Greg Ungerer <gerg@snapgear.com> */
45 .timer = &at91x40_timer, 45 .timer = &at91x40_timer,
46 .init_early = at91eb01_init_early,
46 .init_irq = at91eb01_init_irq, 47 .init_irq = at91eb01_init_irq,
47 .map_io = at91eb01_map_io,
48MACHINE_END 48MACHINE_END
49 49
diff --git a/arch/arm/mach-at91/board-eb9200.c b/arch/arm/mach-at91/board-eb9200.c
index 6cf6566ae346..e9484535cbc8 100644
--- a/arch/arm/mach-at91/board-eb9200.c
+++ b/arch/arm/mach-at91/board-eb9200.c
@@ -40,10 +40,10 @@
40#include "generic.h" 40#include "generic.h"
41 41
42 42
43static void __init eb9200_map_io(void) 43static void __init eb9200_init_early(void)
44{ 44{
45 /* Initialize processor: 18.432 MHz crystal */ 45 /* Initialize processor: 18.432 MHz crystal */
46 at91rm9200_initialize(18432000, AT91RM9200_BGA); 46 at91rm9200_initialize(18432000);
47 47
48 /* DBGU on ttyS0. (Rx & Tx only) */ 48 /* DBGU on ttyS0. (Rx & Tx only) */
49 at91_register_uart(0, 0, 0); 49 at91_register_uart(0, 0, 0);
@@ -120,9 +120,9 @@ static void __init eb9200_board_init(void)
120} 120}
121 121
122MACHINE_START(ATEB9200, "Embest ATEB9200") 122MACHINE_START(ATEB9200, "Embest ATEB9200")
123 .boot_params = AT91_SDRAM_BASE + 0x100,
124 .timer = &at91rm9200_timer, 123 .timer = &at91rm9200_timer,
125 .map_io = eb9200_map_io, 124 .map_io = at91rm9200_map_io,
125 .init_early = eb9200_init_early,
126 .init_irq = eb9200_init_irq, 126 .init_irq = eb9200_init_irq,
127 .init_machine = eb9200_board_init, 127 .init_machine = eb9200_board_init,
128MACHINE_END 128MACHINE_END
diff --git a/arch/arm/mach-at91/board-ecbat91.c b/arch/arm/mach-at91/board-ecbat91.c
index de2fd04e7c8a..a6f57faa10a7 100644
--- a/arch/arm/mach-at91/board-ecbat91.c
+++ b/arch/arm/mach-at91/board-ecbat91.c
@@ -38,14 +38,18 @@
38 38
39#include <mach/board.h> 39#include <mach/board.h>
40#include <mach/gpio.h> 40#include <mach/gpio.h>
41#include <mach/cpu.h>
41 42
42#include "generic.h" 43#include "generic.h"
43 44
44 45
45static void __init ecb_at91map_io(void) 46static void __init ecb_at91init_early(void)
46{ 47{
48 /* Set cpu type: PQFP */
49 at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
50
47 /* Initialize processor: 18.432 MHz crystal */ 51 /* Initialize processor: 18.432 MHz crystal */
48 at91rm9200_initialize(18432000, AT91RM9200_PQFP); 52 at91rm9200_initialize(18432000);
49 53
50 /* Setup the LEDs */ 54 /* Setup the LEDs */
51 at91_init_leds(AT91_PIN_PC7, AT91_PIN_PC7); 55 at91_init_leds(AT91_PIN_PC7, AT91_PIN_PC7);
@@ -168,9 +172,9 @@ static void __init ecb_at91board_init(void)
168 172
169MACHINE_START(ECBAT91, "emQbit's ECB_AT91") 173MACHINE_START(ECBAT91, "emQbit's ECB_AT91")
170 /* Maintainer: emQbit.com */ 174 /* Maintainer: emQbit.com */
171 .boot_params = AT91_SDRAM_BASE + 0x100,
172 .timer = &at91rm9200_timer, 175 .timer = &at91rm9200_timer,
173 .map_io = ecb_at91map_io, 176 .map_io = at91rm9200_map_io,
177 .init_early = ecb_at91init_early,
174 .init_irq = ecb_at91init_irq, 178 .init_irq = ecb_at91init_irq,
175 .init_machine = ecb_at91board_init, 179 .init_machine = ecb_at91board_init,
176MACHINE_END 180MACHINE_END
diff --git a/arch/arm/mach-at91/board-eco920.c b/arch/arm/mach-at91/board-eco920.c
index a158a0ce458f..bfc0062d1483 100644
--- a/arch/arm/mach-at91/board-eco920.c
+++ b/arch/arm/mach-at91/board-eco920.c
@@ -26,11 +26,16 @@
26 26
27#include <mach/board.h> 27#include <mach/board.h>
28#include <mach/at91rm9200_mc.h> 28#include <mach/at91rm9200_mc.h>
29#include <mach/cpu.h>
30
29#include "generic.h" 31#include "generic.h"
30 32
31static void __init eco920_map_io(void) 33static void __init eco920_init_early(void)
32{ 34{
33 at91rm9200_initialize(18432000, AT91RM9200_PQFP); 35 /* Set cpu type: PQFP */
36 at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
37
38 at91rm9200_initialize(18432000);
34 39
35 /* Setup the LEDs */ 40 /* Setup the LEDs */
36 at91_init_leds(AT91_PIN_PB0, AT91_PIN_PB1); 41 at91_init_leds(AT91_PIN_PB0, AT91_PIN_PB1);
@@ -86,21 +91,6 @@ static struct platform_device eco920_flash = {
86 .num_resources = 1, 91 .num_resources = 1,
87}; 92};
88 93
89static struct resource at91_beeper_resources[] = {
90 [0] = {
91 .start = AT91RM9200_BASE_TC3,
92 .end = AT91RM9200_BASE_TC3 + 0x39,
93 .flags = IORESOURCE_MEM,
94 },
95};
96
97static struct platform_device at91_beeper = {
98 .name = "at91_beeper",
99 .id = 0,
100 .resource = at91_beeper_resources,
101 .num_resources = ARRAY_SIZE(at91_beeper_resources),
102};
103
104static struct spi_board_info eco920_spi_devices[] = { 94static struct spi_board_info eco920_spi_devices[] = {
105 { /* CAN controller */ 95 { /* CAN controller */
106 .modalias = "tlv5638", 96 .modalias = "tlv5638",
@@ -139,18 +129,14 @@ static void __init eco920_board_init(void)
139 AT91_SMC_TDF_(1) /* float time */ 129 AT91_SMC_TDF_(1) /* float time */
140 ); 130 );
141 131
142 at91_clock_associate("tc3_clk", &at91_beeper.dev, "at91_beeper");
143 at91_set_B_periph(AT91_PIN_PB6, 0);
144 platform_device_register(&at91_beeper);
145
146 at91_add_device_spi(eco920_spi_devices, ARRAY_SIZE(eco920_spi_devices)); 132 at91_add_device_spi(eco920_spi_devices, ARRAY_SIZE(eco920_spi_devices));
147} 133}
148 134
149MACHINE_START(ECO920, "eco920") 135MACHINE_START(ECO920, "eco920")
150 /* Maintainer: Sascha Hauer */ 136 /* Maintainer: Sascha Hauer */
151 .boot_params = AT91_SDRAM_BASE + 0x100,
152 .timer = &at91rm9200_timer, 137 .timer = &at91rm9200_timer,
153 .map_io = eco920_map_io, 138 .map_io = at91rm9200_map_io,
139 .init_early = eco920_init_early,
154 .init_irq = eco920_init_irq, 140 .init_irq = eco920_init_irq,
155 .init_machine = eco920_board_init, 141 .init_machine = eco920_board_init,
156MACHINE_END 142MACHINE_END
diff --git a/arch/arm/mach-at91/board-flexibity.c b/arch/arm/mach-at91/board-flexibity.c
index c8a62dc8fa65..466c063b8d21 100644
--- a/arch/arm/mach-at91/board-flexibity.c
+++ b/arch/arm/mach-at91/board-flexibity.c
@@ -37,7 +37,7 @@
37 37
38#include "generic.h" 38#include "generic.h"
39 39
40static void __init flexibity_map_io(void) 40static void __init flexibity_init_early(void)
41{ 41{
42 /* Initialize processor: 18.432 MHz crystal */ 42 /* Initialize processor: 18.432 MHz crystal */
43 at91sam9260_initialize(18432000); 43 at91sam9260_initialize(18432000);
@@ -154,9 +154,9 @@ static void __init flexibity_board_init(void)
154 154
155MACHINE_START(FLEXIBITY, "Flexibity Connect") 155MACHINE_START(FLEXIBITY, "Flexibity Connect")
156 /* Maintainer: Maxim Osipov */ 156 /* Maintainer: Maxim Osipov */
157 .boot_params = AT91_SDRAM_BASE + 0x100,
158 .timer = &at91sam926x_timer, 157 .timer = &at91sam926x_timer,
159 .map_io = flexibity_map_io, 158 .map_io = at91sam9260_map_io,
159 .init_early = flexibity_init_early,
160 .init_irq = flexibity_init_irq, 160 .init_irq = flexibity_init_irq,
161 .init_machine = flexibity_board_init, 161 .init_machine = flexibity_board_init,
162MACHINE_END 162MACHINE_END
diff --git a/arch/arm/mach-at91/board-foxg20.c b/arch/arm/mach-at91/board-foxg20.c
index dfc7dfe738e4..e2d1dc9eff45 100644
--- a/arch/arm/mach-at91/board-foxg20.c
+++ b/arch/arm/mach-at91/board-foxg20.c
@@ -57,7 +57,7 @@
57 */ 57 */
58 58
59 59
60static void __init foxg20_map_io(void) 60static void __init foxg20_init_early(void)
61{ 61{
62 /* Initialize processor: 18.432 MHz crystal */ 62 /* Initialize processor: 18.432 MHz crystal */
63 at91sam9260_initialize(18432000); 63 at91sam9260_initialize(18432000);
@@ -266,9 +266,9 @@ static void __init foxg20_board_init(void)
266 266
267MACHINE_START(ACMENETUSFOXG20, "Acme Systems srl FOX Board G20") 267MACHINE_START(ACMENETUSFOXG20, "Acme Systems srl FOX Board G20")
268 /* Maintainer: Sergio Tanzilli */ 268 /* Maintainer: Sergio Tanzilli */
269 .boot_params = AT91_SDRAM_BASE + 0x100,
270 .timer = &at91sam926x_timer, 269 .timer = &at91sam926x_timer,
271 .map_io = foxg20_map_io, 270 .map_io = at91sam9260_map_io,
271 .init_early = foxg20_init_early,
272 .init_irq = foxg20_init_irq, 272 .init_irq = foxg20_init_irq,
273 .init_machine = foxg20_board_init, 273 .init_machine = foxg20_board_init,
274MACHINE_END 274MACHINE_END
diff --git a/arch/arm/mach-at91/board-gsia18s.c b/arch/arm/mach-at91/board-gsia18s.c
index bc28136ee249..1d4f36b3cb27 100644
--- a/arch/arm/mach-at91/board-gsia18s.c
+++ b/arch/arm/mach-at91/board-gsia18s.c
@@ -38,9 +38,9 @@
38#include "sam9_smc.h" 38#include "sam9_smc.h"
39#include "generic.h" 39#include "generic.h"
40 40
41static void __init gsia18s_map_io(void) 41static void __init gsia18s_init_early(void)
42{ 42{
43 stamp9g20_map_io(); 43 stamp9g20_init_early();
44 44
45 /* 45 /*
46 * USART0 on ttyS1 (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI). 46 * USART0 on ttyS1 (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI).
@@ -576,9 +576,9 @@ static void __init gsia18s_board_init(void)
576} 576}
577 577
578MACHINE_START(GSIA18S, "GS_IA18_S") 578MACHINE_START(GSIA18S, "GS_IA18_S")
579 .boot_params = AT91_SDRAM_BASE + 0x100,
580 .timer = &at91sam926x_timer, 579 .timer = &at91sam926x_timer,
581 .map_io = gsia18s_map_io, 580 .map_io = at91sam9260_map_io,
581 .init_early = gsia18s_init_early,
582 .init_irq = init_irq, 582 .init_irq = init_irq,
583 .init_machine = gsia18s_board_init, 583 .init_machine = gsia18s_board_init,
584MACHINE_END 584MACHINE_END
diff --git a/arch/arm/mach-at91/board-kafa.c b/arch/arm/mach-at91/board-kafa.c
index d2e1f4ec1fcc..9b003ff744ba 100644
--- a/arch/arm/mach-at91/board-kafa.c
+++ b/arch/arm/mach-at91/board-kafa.c
@@ -35,14 +35,18 @@
35 35
36#include <mach/board.h> 36#include <mach/board.h>
37#include <mach/gpio.h> 37#include <mach/gpio.h>
38#include <mach/cpu.h>
38 39
39#include "generic.h" 40#include "generic.h"
40 41
41 42
42static void __init kafa_map_io(void) 43static void __init kafa_init_early(void)
43{ 44{
45 /* Set cpu type: PQFP */
46 at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
47
44 /* Initialize processor: 18.432 MHz crystal */ 48 /* Initialize processor: 18.432 MHz crystal */
45 at91rm9200_initialize(18432000, AT91RM9200_PQFP); 49 at91rm9200_initialize(18432000);
46 50
47 /* Set up the LEDs */ 51 /* Set up the LEDs */
48 at91_init_leds(AT91_PIN_PB4, AT91_PIN_PB4); 52 at91_init_leds(AT91_PIN_PB4, AT91_PIN_PB4);
@@ -94,9 +98,9 @@ static void __init kafa_board_init(void)
94 98
95MACHINE_START(KAFA, "Sperry-Sun KAFA") 99MACHINE_START(KAFA, "Sperry-Sun KAFA")
96 /* Maintainer: Sergei Sharonov */ 100 /* Maintainer: Sergei Sharonov */
97 .boot_params = AT91_SDRAM_BASE + 0x100,
98 .timer = &at91rm9200_timer, 101 .timer = &at91rm9200_timer,
99 .map_io = kafa_map_io, 102 .map_io = at91rm9200_map_io,
103 .init_early = kafa_init_early,
100 .init_irq = kafa_init_irq, 104 .init_irq = kafa_init_irq,
101 .init_machine = kafa_board_init, 105 .init_machine = kafa_board_init,
102MACHINE_END 106MACHINE_END
diff --git a/arch/arm/mach-at91/board-kb9202.c b/arch/arm/mach-at91/board-kb9202.c
index a13d2063faff..a813a74b65f9 100644
--- a/arch/arm/mach-at91/board-kb9202.c
+++ b/arch/arm/mach-at91/board-kb9202.c
@@ -36,16 +36,19 @@
36 36
37#include <mach/board.h> 37#include <mach/board.h>
38#include <mach/gpio.h> 38#include <mach/gpio.h>
39 39#include <mach/cpu.h>
40#include <mach/at91rm9200_mc.h> 40#include <mach/at91rm9200_mc.h>
41 41
42#include "generic.h" 42#include "generic.h"
43 43
44 44
45static void __init kb9202_map_io(void) 45static void __init kb9202_init_early(void)
46{ 46{
47 /* Set cpu type: PQFP */
48 at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
49
47 /* Initialize processor: 10 MHz crystal */ 50 /* Initialize processor: 10 MHz crystal */
48 at91rm9200_initialize(10000000, AT91RM9200_PQFP); 51 at91rm9200_initialize(10000000);
49 52
50 /* Set up the LEDs */ 53 /* Set up the LEDs */
51 at91_init_leds(AT91_PIN_PC19, AT91_PIN_PC18); 54 at91_init_leds(AT91_PIN_PC19, AT91_PIN_PC18);
@@ -136,9 +139,9 @@ static void __init kb9202_board_init(void)
136 139
137MACHINE_START(KB9200, "KB920x") 140MACHINE_START(KB9200, "KB920x")
138 /* Maintainer: KwikByte, Inc. */ 141 /* Maintainer: KwikByte, Inc. */
139 .boot_params = AT91_SDRAM_BASE + 0x100,
140 .timer = &at91rm9200_timer, 142 .timer = &at91rm9200_timer,
141 .map_io = kb9202_map_io, 143 .map_io = at91rm9200_map_io,
144 .init_early = kb9202_init_early,
142 .init_irq = kb9202_init_irq, 145 .init_irq = kb9202_init_irq,
143 .init_machine = kb9202_board_init, 146 .init_machine = kb9202_board_init,
144MACHINE_END 147MACHINE_END
diff --git a/arch/arm/mach-at91/board-neocore926.c b/arch/arm/mach-at91/board-neocore926.c
index fe5f1d47e6e2..961e805db68c 100644
--- a/arch/arm/mach-at91/board-neocore926.c
+++ b/arch/arm/mach-at91/board-neocore926.c
@@ -51,7 +51,7 @@
51#include "generic.h" 51#include "generic.h"
52 52
53 53
54static void __init neocore926_map_io(void) 54static void __init neocore926_init_early(void)
55{ 55{
56 /* Initialize processor: 20 MHz crystal */ 56 /* Initialize processor: 20 MHz crystal */
57 at91sam9263_initialize(20000000); 57 at91sam9263_initialize(20000000);
@@ -387,9 +387,9 @@ static void __init neocore926_board_init(void)
387 387
388MACHINE_START(NEOCORE926, "ADENEO NEOCORE 926") 388MACHINE_START(NEOCORE926, "ADENEO NEOCORE 926")
389 /* Maintainer: ADENEO */ 389 /* Maintainer: ADENEO */
390 .boot_params = AT91_SDRAM_BASE + 0x100,
391 .timer = &at91sam926x_timer, 390 .timer = &at91sam926x_timer,
392 .map_io = neocore926_map_io, 391 .map_io = at91sam9263_map_io,
392 .init_early = neocore926_init_early,
393 .init_irq = neocore926_init_irq, 393 .init_irq = neocore926_init_irq,
394 .init_machine = neocore926_board_init, 394 .init_machine = neocore926_board_init,
395MACHINE_END 395MACHINE_END
diff --git a/arch/arm/mach-at91/board-pcontrol-g20.c b/arch/arm/mach-at91/board-pcontrol-g20.c
index feb65787c30b..21a21af25878 100644
--- a/arch/arm/mach-at91/board-pcontrol-g20.c
+++ b/arch/arm/mach-at91/board-pcontrol-g20.c
@@ -37,9 +37,9 @@
37#include "generic.h" 37#include "generic.h"
38 38
39 39
40static void __init pcontrol_g20_map_io(void) 40static void __init pcontrol_g20_init_early(void)
41{ 41{
42 stamp9g20_map_io(); 42 stamp9g20_init_early();
43 43
44 /* USART0 on ttyS1. (Rx, Tx, CTS, RTS) piggyback A2 */ 44 /* USART0 on ttyS1. (Rx, Tx, CTS, RTS) piggyback A2 */
45 at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS 45 at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS
@@ -222,9 +222,9 @@ static void __init pcontrol_g20_board_init(void)
222 222
223MACHINE_START(PCONTROL_G20, "PControl G20") 223MACHINE_START(PCONTROL_G20, "PControl G20")
224 /* Maintainer: pgsellmann@portner-elektronik.at */ 224 /* Maintainer: pgsellmann@portner-elektronik.at */
225 .boot_params = AT91_SDRAM_BASE + 0x100,
226 .timer = &at91sam926x_timer, 225 .timer = &at91sam926x_timer,
227 .map_io = pcontrol_g20_map_io, 226 .map_io = at91sam9260_map_io,
227 .init_early = pcontrol_g20_init_early,
228 .init_irq = init_irq, 228 .init_irq = init_irq,
229 .init_machine = pcontrol_g20_board_init, 229 .init_machine = pcontrol_g20_board_init,
230MACHINE_END 230MACHINE_END
diff --git a/arch/arm/mach-at91/board-picotux200.c b/arch/arm/mach-at91/board-picotux200.c
index 55dad3a46547..756cc2a745dd 100644
--- a/arch/arm/mach-at91/board-picotux200.c
+++ b/arch/arm/mach-at91/board-picotux200.c
@@ -43,10 +43,10 @@
43#include "generic.h" 43#include "generic.h"
44 44
45 45
46static void __init picotux200_map_io(void) 46static void __init picotux200_init_early(void)
47{ 47{
48 /* Initialize processor: 18.432 MHz crystal */ 48 /* Initialize processor: 18.432 MHz crystal */
49 at91rm9200_initialize(18432000, AT91RM9200_BGA); 49 at91rm9200_initialize(18432000);
50 50
51 /* DBGU on ttyS0. (Rx & Tx only) */ 51 /* DBGU on ttyS0. (Rx & Tx only) */
52 at91_register_uart(0, 0, 0); 52 at91_register_uart(0, 0, 0);
@@ -123,9 +123,9 @@ static void __init picotux200_board_init(void)
123 123
124MACHINE_START(PICOTUX2XX, "picotux 200") 124MACHINE_START(PICOTUX2XX, "picotux 200")
125 /* Maintainer: Kleinhenz Elektronik GmbH */ 125 /* Maintainer: Kleinhenz Elektronik GmbH */
126 .boot_params = AT91_SDRAM_BASE + 0x100,
127 .timer = &at91rm9200_timer, 126 .timer = &at91rm9200_timer,
128 .map_io = picotux200_map_io, 127 .map_io = at91rm9200_map_io,
128 .init_early = picotux200_init_early,
129 .init_irq = picotux200_init_irq, 129 .init_irq = picotux200_init_irq,
130 .init_machine = picotux200_board_init, 130 .init_machine = picotux200_board_init,
131MACHINE_END 131MACHINE_END
diff --git a/arch/arm/mach-at91/board-qil-a9260.c b/arch/arm/mach-at91/board-qil-a9260.c
index 69d15a875b66..d1a6001b0bd8 100644
--- a/arch/arm/mach-at91/board-qil-a9260.c
+++ b/arch/arm/mach-at91/board-qil-a9260.c
@@ -48,7 +48,7 @@
48#include "generic.h" 48#include "generic.h"
49 49
50 50
51static void __init ek_map_io(void) 51static void __init ek_init_early(void)
52{ 52{
53 /* Initialize processor: 12.000 MHz crystal */ 53 /* Initialize processor: 12.000 MHz crystal */
54 at91sam9260_initialize(12000000); 54 at91sam9260_initialize(12000000);
@@ -268,9 +268,9 @@ static void __init ek_board_init(void)
268 268
269MACHINE_START(QIL_A9260, "CALAO QIL_A9260") 269MACHINE_START(QIL_A9260, "CALAO QIL_A9260")
270 /* Maintainer: calao-systems */ 270 /* Maintainer: calao-systems */
271 .boot_params = AT91_SDRAM_BASE + 0x100,
272 .timer = &at91sam926x_timer, 271 .timer = &at91sam926x_timer,
273 .map_io = ek_map_io, 272 .map_io = at91sam9260_map_io,
273 .init_early = ek_init_early,
274 .init_irq = ek_init_irq, 274 .init_irq = ek_init_irq,
275 .init_machine = ek_board_init, 275 .init_machine = ek_board_init,
276MACHINE_END 276MACHINE_END
diff --git a/arch/arm/mach-at91/board-rm9200dk.c b/arch/arm/mach-at91/board-rm9200dk.c
index 4c1047c8200d..aef9627710b0 100644
--- a/arch/arm/mach-at91/board-rm9200dk.c
+++ b/arch/arm/mach-at91/board-rm9200dk.c
@@ -45,10 +45,10 @@
45#include "generic.h" 45#include "generic.h"
46 46
47 47
48static void __init dk_map_io(void) 48static void __init dk_init_early(void)
49{ 49{
50 /* Initialize processor: 18.432 MHz crystal */ 50 /* Initialize processor: 18.432 MHz crystal */
51 at91rm9200_initialize(18432000, AT91RM9200_BGA); 51 at91rm9200_initialize(18432000);
52 52
53 /* Setup the LEDs */ 53 /* Setup the LEDs */
54 at91_init_leds(AT91_PIN_PB2, AT91_PIN_PB2); 54 at91_init_leds(AT91_PIN_PB2, AT91_PIN_PB2);
@@ -227,9 +227,9 @@ static void __init dk_board_init(void)
227 227
228MACHINE_START(AT91RM9200DK, "Atmel AT91RM9200-DK") 228MACHINE_START(AT91RM9200DK, "Atmel AT91RM9200-DK")
229 /* Maintainer: SAN People/Atmel */ 229 /* Maintainer: SAN People/Atmel */
230 .boot_params = AT91_SDRAM_BASE + 0x100,
231 .timer = &at91rm9200_timer, 230 .timer = &at91rm9200_timer,
232 .map_io = dk_map_io, 231 .map_io = at91rm9200_map_io,
232 .init_early = dk_init_early,
233 .init_irq = dk_init_irq, 233 .init_irq = dk_init_irq,
234 .init_machine = dk_board_init, 234 .init_machine = dk_board_init,
235MACHINE_END 235MACHINE_END
diff --git a/arch/arm/mach-at91/board-rm9200ek.c b/arch/arm/mach-at91/board-rm9200ek.c
index 9df1be8818c0..015a02183080 100644
--- a/arch/arm/mach-at91/board-rm9200ek.c
+++ b/arch/arm/mach-at91/board-rm9200ek.c
@@ -45,10 +45,10 @@
45#include "generic.h" 45#include "generic.h"
46 46
47 47
48static void __init ek_map_io(void) 48static void __init ek_init_early(void)
49{ 49{
50 /* Initialize processor: 18.432 MHz crystal */ 50 /* Initialize processor: 18.432 MHz crystal */
51 at91rm9200_initialize(18432000, AT91RM9200_BGA); 51 at91rm9200_initialize(18432000);
52 52
53 /* Setup the LEDs */ 53 /* Setup the LEDs */
54 at91_init_leds(AT91_PIN_PB1, AT91_PIN_PB2); 54 at91_init_leds(AT91_PIN_PB1, AT91_PIN_PB2);
@@ -193,9 +193,9 @@ static void __init ek_board_init(void)
193 193
194MACHINE_START(AT91RM9200EK, "Atmel AT91RM9200-EK") 194MACHINE_START(AT91RM9200EK, "Atmel AT91RM9200-EK")
195 /* Maintainer: SAN People/Atmel */ 195 /* Maintainer: SAN People/Atmel */
196 .boot_params = AT91_SDRAM_BASE + 0x100,
197 .timer = &at91rm9200_timer, 196 .timer = &at91rm9200_timer,
198 .map_io = ek_map_io, 197 .map_io = at91rm9200_map_io,
198 .init_early = ek_init_early,
199 .init_irq = ek_init_irq, 199 .init_irq = ek_init_irq,
200 .init_machine = ek_board_init, 200 .init_machine = ek_board_init,
201MACHINE_END 201MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9-l9260.c b/arch/arm/mach-at91/board-sam9-l9260.c
index 25a26beaa728..aaf1bf0989b3 100644
--- a/arch/arm/mach-at91/board-sam9-l9260.c
+++ b/arch/arm/mach-at91/board-sam9-l9260.c
@@ -44,7 +44,7 @@
44#include "generic.h" 44#include "generic.h"
45 45
46 46
47static void __init ek_map_io(void) 47static void __init ek_init_early(void)
48{ 48{
49 /* Initialize processor: 18.432 MHz crystal */ 49 /* Initialize processor: 18.432 MHz crystal */
50 at91sam9260_initialize(18432000); 50 at91sam9260_initialize(18432000);
@@ -212,9 +212,9 @@ static void __init ek_board_init(void)
212 212
213MACHINE_START(SAM9_L9260, "Olimex SAM9-L9260") 213MACHINE_START(SAM9_L9260, "Olimex SAM9-L9260")
214 /* Maintainer: Olimex */ 214 /* Maintainer: Olimex */
215 .boot_params = AT91_SDRAM_BASE + 0x100,
216 .timer = &at91sam926x_timer, 215 .timer = &at91sam926x_timer,
217 .map_io = ek_map_io, 216 .map_io = at91sam9260_map_io,
217 .init_early = ek_init_early,
218 .init_irq = ek_init_irq, 218 .init_irq = ek_init_irq,
219 .init_machine = ek_board_init, 219 .init_machine = ek_board_init,
220MACHINE_END 220MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9260ek.c b/arch/arm/mach-at91/board-sam9260ek.c
index de1816e0e1d9..d600dc123227 100644
--- a/arch/arm/mach-at91/board-sam9260ek.c
+++ b/arch/arm/mach-at91/board-sam9260ek.c
@@ -44,12 +44,13 @@
44#include <mach/gpio.h> 44#include <mach/gpio.h>
45#include <mach/at91sam9_smc.h> 45#include <mach/at91sam9_smc.h>
46#include <mach/at91_shdwc.h> 46#include <mach/at91_shdwc.h>
47#include <mach/system_rev.h>
47 48
48#include "sam9_smc.h" 49#include "sam9_smc.h"
49#include "generic.h" 50#include "generic.h"
50 51
51 52
52static void __init ek_map_io(void) 53static void __init ek_init_early(void)
53{ 54{
54 /* Initialize processor: 18.432 MHz crystal */ 55 /* Initialize processor: 18.432 MHz crystal */
55 at91sam9260_initialize(18432000); 56 at91sam9260_initialize(18432000);
@@ -191,11 +192,6 @@ static struct atmel_nand_data __initdata ek_nand_data = {
191 .rdy_pin = AT91_PIN_PC13, 192 .rdy_pin = AT91_PIN_PC13,
192 .enable_pin = AT91_PIN_PC14, 193 .enable_pin = AT91_PIN_PC14,
193 .partition_info = nand_partitions, 194 .partition_info = nand_partitions,
194#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
195 .bus_width_16 = 1,
196#else
197 .bus_width_16 = 0,
198#endif
199}; 195};
200 196
201static struct sam9_smc_config __initdata ek_nand_smc_config = { 197static struct sam9_smc_config __initdata ek_nand_smc_config = {
@@ -218,6 +214,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
218 214
219static void __init ek_add_device_nand(void) 215static void __init ek_add_device_nand(void)
220{ 216{
217 ek_nand_data.bus_width_16 = !board_have_nand_8bit();
221 /* setup bus-width (8 or 16) */ 218 /* setup bus-width (8 or 16) */
222 if (ek_nand_data.bus_width_16) 219 if (ek_nand_data.bus_width_16)
223 ek_nand_smc_config.mode |= AT91_SMC_DBW_16; 220 ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
@@ -356,9 +353,9 @@ static void __init ek_board_init(void)
356 353
357MACHINE_START(AT91SAM9260EK, "Atmel AT91SAM9260-EK") 354MACHINE_START(AT91SAM9260EK, "Atmel AT91SAM9260-EK")
358 /* Maintainer: Atmel */ 355 /* Maintainer: Atmel */
359 .boot_params = AT91_SDRAM_BASE + 0x100,
360 .timer = &at91sam926x_timer, 356 .timer = &at91sam926x_timer,
361 .map_io = ek_map_io, 357 .map_io = at91sam9260_map_io,
358 .init_early = ek_init_early,
362 .init_irq = ek_init_irq, 359 .init_irq = ek_init_irq,
363 .init_machine = ek_board_init, 360 .init_machine = ek_board_init,
364MACHINE_END 361MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c
index 14acc901e24c..f897f84d43dc 100644
--- a/arch/arm/mach-at91/board-sam9261ek.c
+++ b/arch/arm/mach-at91/board-sam9261ek.c
@@ -48,12 +48,13 @@
48#include <mach/gpio.h> 48#include <mach/gpio.h>
49#include <mach/at91sam9_smc.h> 49#include <mach/at91sam9_smc.h>
50#include <mach/at91_shdwc.h> 50#include <mach/at91_shdwc.h>
51#include <mach/system_rev.h>
51 52
52#include "sam9_smc.h" 53#include "sam9_smc.h"
53#include "generic.h" 54#include "generic.h"
54 55
55 56
56static void __init ek_map_io(void) 57static void __init ek_init_early(void)
57{ 58{
58 /* Initialize processor: 18.432 MHz crystal */ 59 /* Initialize processor: 18.432 MHz crystal */
59 at91sam9261_initialize(18432000); 60 at91sam9261_initialize(18432000);
@@ -197,11 +198,6 @@ static struct atmel_nand_data __initdata ek_nand_data = {
197 .rdy_pin = AT91_PIN_PC15, 198 .rdy_pin = AT91_PIN_PC15,
198 .enable_pin = AT91_PIN_PC14, 199 .enable_pin = AT91_PIN_PC14,
199 .partition_info = nand_partitions, 200 .partition_info = nand_partitions,
200#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
201 .bus_width_16 = 1,
202#else
203 .bus_width_16 = 0,
204#endif
205}; 201};
206 202
207static struct sam9_smc_config __initdata ek_nand_smc_config = { 203static struct sam9_smc_config __initdata ek_nand_smc_config = {
@@ -224,6 +220,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
224 220
225static void __init ek_add_device_nand(void) 221static void __init ek_add_device_nand(void)
226{ 222{
223 ek_nand_data.bus_width_16 = !board_have_nand_8bit();
227 /* setup bus-width (8 or 16) */ 224 /* setup bus-width (8 or 16) */
228 if (ek_nand_data.bus_width_16) 225 if (ek_nand_data.bus_width_16)
229 ek_nand_smc_config.mode |= AT91_SMC_DBW_16; 226 ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
@@ -623,9 +620,9 @@ MACHINE_START(AT91SAM9261EK, "Atmel AT91SAM9261-EK")
623MACHINE_START(AT91SAM9G10EK, "Atmel AT91SAM9G10-EK") 620MACHINE_START(AT91SAM9G10EK, "Atmel AT91SAM9G10-EK")
624#endif 621#endif
625 /* Maintainer: Atmel */ 622 /* Maintainer: Atmel */
626 .boot_params = AT91_SDRAM_BASE + 0x100,
627 .timer = &at91sam926x_timer, 623 .timer = &at91sam926x_timer,
628 .map_io = ek_map_io, 624 .map_io = at91sam9261_map_io,
625 .init_early = ek_init_early,
629 .init_irq = ek_init_irq, 626 .init_irq = ek_init_irq,
630 .init_machine = ek_board_init, 627 .init_machine = ek_board_init,
631MACHINE_END 628MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c
index bfe490df58be..605b26f40a4c 100644
--- a/arch/arm/mach-at91/board-sam9263ek.c
+++ b/arch/arm/mach-at91/board-sam9263ek.c
@@ -47,12 +47,13 @@
47#include <mach/gpio.h> 47#include <mach/gpio.h>
48#include <mach/at91sam9_smc.h> 48#include <mach/at91sam9_smc.h>
49#include <mach/at91_shdwc.h> 49#include <mach/at91_shdwc.h>
50#include <mach/system_rev.h>
50 51
51#include "sam9_smc.h" 52#include "sam9_smc.h"
52#include "generic.h" 53#include "generic.h"
53 54
54 55
55static void __init ek_map_io(void) 56static void __init ek_init_early(void)
56{ 57{
57 /* Initialize processor: 16.367 MHz crystal */ 58 /* Initialize processor: 16.367 MHz crystal */
58 at91sam9263_initialize(16367660); 59 at91sam9263_initialize(16367660);
@@ -198,11 +199,6 @@ static struct atmel_nand_data __initdata ek_nand_data = {
198 .rdy_pin = AT91_PIN_PA22, 199 .rdy_pin = AT91_PIN_PA22,
199 .enable_pin = AT91_PIN_PD15, 200 .enable_pin = AT91_PIN_PD15,
200 .partition_info = nand_partitions, 201 .partition_info = nand_partitions,
201#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
202 .bus_width_16 = 1,
203#else
204 .bus_width_16 = 0,
205#endif
206}; 202};
207 203
208static struct sam9_smc_config __initdata ek_nand_smc_config = { 204static struct sam9_smc_config __initdata ek_nand_smc_config = {
@@ -225,6 +221,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
225 221
226static void __init ek_add_device_nand(void) 222static void __init ek_add_device_nand(void)
227{ 223{
224 ek_nand_data.bus_width_16 = !board_have_nand_8bit();
228 /* setup bus-width (8 or 16) */ 225 /* setup bus-width (8 or 16) */
229 if (ek_nand_data.bus_width_16) 226 if (ek_nand_data.bus_width_16)
230 ek_nand_smc_config.mode |= AT91_SMC_DBW_16; 227 ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
@@ -454,9 +451,9 @@ static void __init ek_board_init(void)
454 451
455MACHINE_START(AT91SAM9263EK, "Atmel AT91SAM9263-EK") 452MACHINE_START(AT91SAM9263EK, "Atmel AT91SAM9263-EK")
456 /* Maintainer: Atmel */ 453 /* Maintainer: Atmel */
457 .boot_params = AT91_SDRAM_BASE + 0x100,
458 .timer = &at91sam926x_timer, 454 .timer = &at91sam926x_timer,
459 .map_io = ek_map_io, 455 .map_io = at91sam9263_map_io,
456 .init_early = ek_init_early,
460 .init_irq = ek_init_irq, 457 .init_irq = ek_init_irq,
461 .init_machine = ek_board_init, 458 .init_machine = ek_board_init,
462MACHINE_END 459MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9g20ek.c b/arch/arm/mach-at91/board-sam9g20ek.c
index ca8198b3c168..7624cf0d006b 100644
--- a/arch/arm/mach-at91/board-sam9g20ek.c
+++ b/arch/arm/mach-at91/board-sam9g20ek.c
@@ -43,6 +43,7 @@
43#include <mach/board.h> 43#include <mach/board.h>
44#include <mach/gpio.h> 44#include <mach/gpio.h>
45#include <mach/at91sam9_smc.h> 45#include <mach/at91sam9_smc.h>
46#include <mach/system_rev.h>
46 47
47#include "sam9_smc.h" 48#include "sam9_smc.h"
48#include "generic.h" 49#include "generic.h"
@@ -60,7 +61,7 @@ static int inline ek_have_2mmc(void)
60} 61}
61 62
62 63
63static void __init ek_map_io(void) 64static void __init ek_init_early(void)
64{ 65{
65 /* Initialize processor: 18.432 MHz crystal */ 66 /* Initialize processor: 18.432 MHz crystal */
66 at91sam9260_initialize(18432000); 67 at91sam9260_initialize(18432000);
@@ -175,11 +176,6 @@ static struct atmel_nand_data __initdata ek_nand_data = {
175 .rdy_pin = AT91_PIN_PC13, 176 .rdy_pin = AT91_PIN_PC13,
176 .enable_pin = AT91_PIN_PC14, 177 .enable_pin = AT91_PIN_PC14,
177 .partition_info = nand_partitions, 178 .partition_info = nand_partitions,
178#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
179 .bus_width_16 = 1,
180#else
181 .bus_width_16 = 0,
182#endif
183}; 179};
184 180
185static struct sam9_smc_config __initdata ek_nand_smc_config = { 181static struct sam9_smc_config __initdata ek_nand_smc_config = {
@@ -202,6 +198,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
202 198
203static void __init ek_add_device_nand(void) 199static void __init ek_add_device_nand(void)
204{ 200{
201 ek_nand_data.bus_width_16 = !board_have_nand_8bit();
205 /* setup bus-width (8 or 16) */ 202 /* setup bus-width (8 or 16) */
206 if (ek_nand_data.bus_width_16) 203 if (ek_nand_data.bus_width_16)
207 ek_nand_smc_config.mode |= AT91_SMC_DBW_16; 204 ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
@@ -406,18 +403,18 @@ static void __init ek_board_init(void)
406 403
407MACHINE_START(AT91SAM9G20EK, "Atmel AT91SAM9G20-EK") 404MACHINE_START(AT91SAM9G20EK, "Atmel AT91SAM9G20-EK")
408 /* Maintainer: Atmel */ 405 /* Maintainer: Atmel */
409 .boot_params = AT91_SDRAM_BASE + 0x100,
410 .timer = &at91sam926x_timer, 406 .timer = &at91sam926x_timer,
411 .map_io = ek_map_io, 407 .map_io = at91sam9260_map_io,
408 .init_early = ek_init_early,
412 .init_irq = ek_init_irq, 409 .init_irq = ek_init_irq,
413 .init_machine = ek_board_init, 410 .init_machine = ek_board_init,
414MACHINE_END 411MACHINE_END
415 412
416MACHINE_START(AT91SAM9G20EK_2MMC, "Atmel AT91SAM9G20-EK 2 MMC Slot Mod") 413MACHINE_START(AT91SAM9G20EK_2MMC, "Atmel AT91SAM9G20-EK 2 MMC Slot Mod")
417 /* Maintainer: Atmel */ 414 /* Maintainer: Atmel */
418 .boot_params = AT91_SDRAM_BASE + 0x100,
419 .timer = &at91sam926x_timer, 415 .timer = &at91sam926x_timer,
420 .map_io = ek_map_io, 416 .map_io = at91sam9260_map_io,
417 .init_early = ek_init_early,
421 .init_irq = ek_init_irq, 418 .init_irq = ek_init_irq,
422 .init_machine = ek_board_init, 419 .init_machine = ek_board_init,
423MACHINE_END 420MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c
index 6c999dbd2bcf..063c95d0e8f0 100644
--- a/arch/arm/mach-at91/board-sam9m10g45ek.c
+++ b/arch/arm/mach-at91/board-sam9m10g45ek.c
@@ -41,12 +41,13 @@
41#include <mach/gpio.h> 41#include <mach/gpio.h>
42#include <mach/at91sam9_smc.h> 42#include <mach/at91sam9_smc.h>
43#include <mach/at91_shdwc.h> 43#include <mach/at91_shdwc.h>
44#include <mach/system_rev.h>
44 45
45#include "sam9_smc.h" 46#include "sam9_smc.h"
46#include "generic.h" 47#include "generic.h"
47 48
48 49
49static void __init ek_map_io(void) 50static void __init ek_init_early(void)
50{ 51{
51 /* Initialize processor: 12.000 MHz crystal */ 52 /* Initialize processor: 12.000 MHz crystal */
52 at91sam9g45_initialize(12000000); 53 at91sam9g45_initialize(12000000);
@@ -155,11 +156,6 @@ static struct atmel_nand_data __initdata ek_nand_data = {
155 .rdy_pin = AT91_PIN_PC8, 156 .rdy_pin = AT91_PIN_PC8,
156 .enable_pin = AT91_PIN_PC14, 157 .enable_pin = AT91_PIN_PC14,
157 .partition_info = nand_partitions, 158 .partition_info = nand_partitions,
158#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
159 .bus_width_16 = 1,
160#else
161 .bus_width_16 = 0,
162#endif
163}; 159};
164 160
165static struct sam9_smc_config __initdata ek_nand_smc_config = { 161static struct sam9_smc_config __initdata ek_nand_smc_config = {
@@ -182,6 +178,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
182 178
183static void __init ek_add_device_nand(void) 179static void __init ek_add_device_nand(void)
184{ 180{
181 ek_nand_data.bus_width_16 = !board_have_nand_8bit();
185 /* setup bus-width (8 or 16) */ 182 /* setup bus-width (8 or 16) */
186 if (ek_nand_data.bus_width_16) 183 if (ek_nand_data.bus_width_16)
187 ek_nand_smc_config.mode |= AT91_SMC_DBW_16; 184 ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
@@ -424,9 +421,9 @@ static void __init ek_board_init(void)
424 421
425MACHINE_START(AT91SAM9M10G45EK, "Atmel AT91SAM9M10G45-EK") 422MACHINE_START(AT91SAM9M10G45EK, "Atmel AT91SAM9M10G45-EK")
426 /* Maintainer: Atmel */ 423 /* Maintainer: Atmel */
427 .boot_params = AT91_SDRAM_BASE + 0x100,
428 .timer = &at91sam926x_timer, 424 .timer = &at91sam926x_timer,
429 .map_io = ek_map_io, 425 .map_io = at91sam9g45_map_io,
426 .init_early = ek_init_early,
430 .init_irq = ek_init_irq, 427 .init_irq = ek_init_irq,
431 .init_machine = ek_board_init, 428 .init_machine = ek_board_init,
432MACHINE_END 429MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9rlek.c b/arch/arm/mach-at91/board-sam9rlek.c
index 3bf3408e94c1..effb399a80a6 100644
--- a/arch/arm/mach-at91/board-sam9rlek.c
+++ b/arch/arm/mach-at91/board-sam9rlek.c
@@ -38,7 +38,7 @@
38#include "generic.h" 38#include "generic.h"
39 39
40 40
41static void __init ek_map_io(void) 41static void __init ek_init_early(void)
42{ 42{
43 /* Initialize processor: 12.000 MHz crystal */ 43 /* Initialize processor: 12.000 MHz crystal */
44 at91sam9rl_initialize(12000000); 44 at91sam9rl_initialize(12000000);
@@ -329,9 +329,9 @@ static void __init ek_board_init(void)
329 329
330MACHINE_START(AT91SAM9RLEK, "Atmel AT91SAM9RL-EK") 330MACHINE_START(AT91SAM9RLEK, "Atmel AT91SAM9RL-EK")
331 /* Maintainer: Atmel */ 331 /* Maintainer: Atmel */
332 .boot_params = AT91_SDRAM_BASE + 0x100,
333 .timer = &at91sam926x_timer, 332 .timer = &at91sam926x_timer,
334 .map_io = ek_map_io, 333 .map_io = at91sam9rl_map_io,
334 .init_early = ek_init_early,
335 .init_irq = ek_init_irq, 335 .init_irq = ek_init_irq,
336 .init_machine = ek_board_init, 336 .init_machine = ek_board_init,
337MACHINE_END 337MACHINE_END
diff --git a/arch/arm/mach-at91/board-snapper9260.c b/arch/arm/mach-at91/board-snapper9260.c
index 17f7d9b32142..3eb0a1153cc8 100644
--- a/arch/arm/mach-at91/board-snapper9260.c
+++ b/arch/arm/mach-at91/board-snapper9260.c
@@ -40,7 +40,7 @@
40 40
41#define SNAPPER9260_IO_EXP_GPIO(x) (NR_BUILTIN_GPIO + (x)) 41#define SNAPPER9260_IO_EXP_GPIO(x) (NR_BUILTIN_GPIO + (x))
42 42
43static void __init snapper9260_map_io(void) 43static void __init snapper9260_init_early(void)
44{ 44{
45 at91sam9260_initialize(18432000); 45 at91sam9260_initialize(18432000);
46 46
@@ -178,9 +178,9 @@ static void __init snapper9260_board_init(void)
178} 178}
179 179
180MACHINE_START(SNAPPER_9260, "Bluewater Systems Snapper 9260/9G20 module") 180MACHINE_START(SNAPPER_9260, "Bluewater Systems Snapper 9260/9G20 module")
181 .boot_params = AT91_SDRAM_BASE + 0x100,
182 .timer = &at91sam926x_timer, 181 .timer = &at91sam926x_timer,
183 .map_io = snapper9260_map_io, 182 .map_io = at91sam9260_map_io,
183 .init_early = snapper9260_init_early,
184 .init_irq = snapper9260_init_irq, 184 .init_irq = snapper9260_init_irq,
185 .init_machine = snapper9260_board_init, 185 .init_machine = snapper9260_board_init,
186MACHINE_END 186MACHINE_END
diff --git a/arch/arm/mach-at91/board-stamp9g20.c b/arch/arm/mach-at91/board-stamp9g20.c
index f8902b118960..5e5c85688f5f 100644
--- a/arch/arm/mach-at91/board-stamp9g20.c
+++ b/arch/arm/mach-at91/board-stamp9g20.c
@@ -32,7 +32,7 @@
32#include "generic.h" 32#include "generic.h"
33 33
34 34
35void __init stamp9g20_map_io(void) 35void __init stamp9g20_init_early(void)
36{ 36{
37 /* Initialize processor: 18.432 MHz crystal */ 37 /* Initialize processor: 18.432 MHz crystal */
38 at91sam9260_initialize(18432000); 38 at91sam9260_initialize(18432000);
@@ -44,9 +44,9 @@ void __init stamp9g20_map_io(void)
44 at91_set_serial_console(0); 44 at91_set_serial_console(0);
45} 45}
46 46
47static void __init stamp9g20evb_map_io(void) 47static void __init stamp9g20evb_init_early(void)
48{ 48{
49 stamp9g20_map_io(); 49 stamp9g20_init_early();
50 50
51 /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ 51 /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
52 at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS 52 at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
@@ -54,9 +54,9 @@ static void __init stamp9g20evb_map_io(void)
54 | ATMEL_UART_DCD | ATMEL_UART_RI); 54 | ATMEL_UART_DCD | ATMEL_UART_RI);
55} 55}
56 56
57static void __init portuxg20_map_io(void) 57static void __init portuxg20_init_early(void)
58{ 58{
59 stamp9g20_map_io(); 59 stamp9g20_init_early();
60 60
61 /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ 61 /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
62 at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS 62 at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
@@ -298,18 +298,18 @@ static void __init stamp9g20evb_board_init(void)
298 298
299MACHINE_START(PORTUXG20, "taskit PortuxG20") 299MACHINE_START(PORTUXG20, "taskit PortuxG20")
300 /* Maintainer: taskit GmbH */ 300 /* Maintainer: taskit GmbH */
301 .boot_params = AT91_SDRAM_BASE + 0x100,
302 .timer = &at91sam926x_timer, 301 .timer = &at91sam926x_timer,
303 .map_io = portuxg20_map_io, 302 .map_io = at91sam9260_map_io,
303 .init_early = portuxg20_init_early,
304 .init_irq = init_irq, 304 .init_irq = init_irq,
305 .init_machine = portuxg20_board_init, 305 .init_machine = portuxg20_board_init,
306MACHINE_END 306MACHINE_END
307 307
308MACHINE_START(STAMP9G20, "taskit Stamp9G20") 308MACHINE_START(STAMP9G20, "taskit Stamp9G20")
309 /* Maintainer: taskit GmbH */ 309 /* Maintainer: taskit GmbH */
310 .boot_params = AT91_SDRAM_BASE + 0x100,
311 .timer = &at91sam926x_timer, 310 .timer = &at91sam926x_timer,
312 .map_io = stamp9g20evb_map_io, 311 .map_io = at91sam9260_map_io,
312 .init_early = stamp9g20evb_init_early,
313 .init_irq = init_irq, 313 .init_irq = init_irq,
314 .init_machine = stamp9g20evb_board_init, 314 .init_machine = stamp9g20evb_board_init,
315MACHINE_END 315MACHINE_END
diff --git a/arch/arm/mach-at91/board-usb-a9260.c b/arch/arm/mach-at91/board-usb-a9260.c
index 07784baeae84..0e784e6fedec 100644
--- a/arch/arm/mach-at91/board-usb-a9260.c
+++ b/arch/arm/mach-at91/board-usb-a9260.c
@@ -48,7 +48,7 @@
48#include "generic.h" 48#include "generic.h"
49 49
50 50
51static void __init ek_map_io(void) 51static void __init ek_init_early(void)
52{ 52{
53 /* Initialize processor: 12.000 MHz crystal */ 53 /* Initialize processor: 12.000 MHz crystal */
54 at91sam9260_initialize(12000000); 54 at91sam9260_initialize(12000000);
@@ -228,9 +228,9 @@ static void __init ek_board_init(void)
228 228
229MACHINE_START(USB_A9260, "CALAO USB_A9260") 229MACHINE_START(USB_A9260, "CALAO USB_A9260")
230 /* Maintainer: calao-systems */ 230 /* Maintainer: calao-systems */
231 .boot_params = AT91_SDRAM_BASE + 0x100,
232 .timer = &at91sam926x_timer, 231 .timer = &at91sam926x_timer,
233 .map_io = ek_map_io, 232 .map_io = at91sam9260_map_io,
233 .init_early = ek_init_early,
234 .init_irq = ek_init_irq, 234 .init_irq = ek_init_irq,
235 .init_machine = ek_board_init, 235 .init_machine = ek_board_init,
236MACHINE_END 236MACHINE_END
diff --git a/arch/arm/mach-at91/board-usb-a9263.c b/arch/arm/mach-at91/board-usb-a9263.c
index b614508931fd..cf626dd14b2c 100644
--- a/arch/arm/mach-at91/board-usb-a9263.c
+++ b/arch/arm/mach-at91/board-usb-a9263.c
@@ -47,7 +47,7 @@
47#include "generic.h" 47#include "generic.h"
48 48
49 49
50static void __init ek_map_io(void) 50static void __init ek_init_early(void)
51{ 51{
52 /* Initialize processor: 12.00 MHz crystal */ 52 /* Initialize processor: 12.00 MHz crystal */
53 at91sam9263_initialize(12000000); 53 at91sam9263_initialize(12000000);
@@ -244,9 +244,9 @@ static void __init ek_board_init(void)
244 244
245MACHINE_START(USB_A9263, "CALAO USB_A9263") 245MACHINE_START(USB_A9263, "CALAO USB_A9263")
246 /* Maintainer: calao-systems */ 246 /* Maintainer: calao-systems */
247 .boot_params = AT91_SDRAM_BASE + 0x100,
248 .timer = &at91sam926x_timer, 247 .timer = &at91sam926x_timer,
249 .map_io = ek_map_io, 248 .map_io = at91sam9263_map_io,
249 .init_early = ek_init_early,
250 .init_irq = ek_init_irq, 250 .init_irq = ek_init_irq,
251 .init_machine = ek_board_init, 251 .init_machine = ek_board_init,
252MACHINE_END 252MACHINE_END
diff --git a/arch/arm/mach-at91/board-yl-9200.c b/arch/arm/mach-at91/board-yl-9200.c
index e0f0080eb639..c208cc334d7d 100644
--- a/arch/arm/mach-at91/board-yl-9200.c
+++ b/arch/arm/mach-at91/board-yl-9200.c
@@ -45,14 +45,18 @@
45#include <mach/board.h> 45#include <mach/board.h>
46#include <mach/gpio.h> 46#include <mach/gpio.h>
47#include <mach/at91rm9200_mc.h> 47#include <mach/at91rm9200_mc.h>
48#include <mach/cpu.h>
48 49
49#include "generic.h" 50#include "generic.h"
50 51
51 52
52static void __init yl9200_map_io(void) 53static void __init yl9200_init_early(void)
53{ 54{
55 /* Set cpu type: PQFP */
56 at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
57
54 /* Initialize processor: 18.432 MHz crystal */ 58 /* Initialize processor: 18.432 MHz crystal */
55 at91rm9200_initialize(18432000, AT91RM9200_PQFP); 59 at91rm9200_initialize(18432000);
56 60
57 /* Setup the LEDs D2=PB17 (timer), D3=PB16 (cpu) */ 61 /* Setup the LEDs D2=PB17 (timer), D3=PB16 (cpu) */
58 at91_init_leds(AT91_PIN_PB16, AT91_PIN_PB17); 62 at91_init_leds(AT91_PIN_PB16, AT91_PIN_PB17);
@@ -594,9 +598,9 @@ static void __init yl9200_board_init(void)
594 598
595MACHINE_START(YL9200, "uCdragon YL-9200") 599MACHINE_START(YL9200, "uCdragon YL-9200")
596 /* Maintainer: S.Birtles */ 600 /* Maintainer: S.Birtles */
597 .boot_params = AT91_SDRAM_BASE + 0x100,
598 .timer = &at91rm9200_timer, 601 .timer = &at91rm9200_timer,
599 .map_io = yl9200_map_io, 602 .map_io = at91rm9200_map_io,
603 .init_early = yl9200_init_early,
600 .init_irq = yl9200_init_irq, 604 .init_irq = yl9200_init_irq,
601 .init_machine = yl9200_board_init, 605 .init_machine = yl9200_board_init,
602MACHINE_END 606MACHINE_END
diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c
index 9113da6845f1..61873f3aa92d 100644
--- a/arch/arm/mach-at91/clock.c
+++ b/arch/arm/mach-at91/clock.c
@@ -163,7 +163,7 @@ static struct clk udpck = {
163 .parent = &pllb, 163 .parent = &pllb,
164 .mode = pmc_sys_mode, 164 .mode = pmc_sys_mode,
165}; 165};
166static struct clk utmi_clk = { 166struct clk utmi_clk = {
167 .name = "utmi_clk", 167 .name = "utmi_clk",
168 .parent = &main_clk, 168 .parent = &main_clk,
169 .pmc_mask = AT91_PMC_UPLLEN, /* in CKGR_UCKR */ 169 .pmc_mask = AT91_PMC_UPLLEN, /* in CKGR_UCKR */
@@ -182,7 +182,7 @@ static struct clk uhpck = {
182 * memory, interfaces to on-chip peripherals, the AIC, and sometimes more 182 * memory, interfaces to on-chip peripherals, the AIC, and sometimes more
183 * (e.g baud rate generation). It's sourced from one of the primary clocks. 183 * (e.g baud rate generation). It's sourced from one of the primary clocks.
184 */ 184 */
185static struct clk mck = { 185struct clk mck = {
186 .name = "mck", 186 .name = "mck",
187 .pmc_mask = AT91_PMC_MCKRDY, /* in PMC_SR */ 187 .pmc_mask = AT91_PMC_MCKRDY, /* in PMC_SR */
188}; 188};
@@ -215,43 +215,6 @@ static struct clk __init *at91_css_to_clk(unsigned long css)
215 return NULL; 215 return NULL;
216} 216}
217 217
218/*
219 * Associate a particular clock with a function (eg, "uart") and device.
220 * The drivers can then request the same 'function' with several different
221 * devices and not care about which clock name to use.
222 */
223void __init at91_clock_associate(const char *id, struct device *dev, const char *func)
224{
225 struct clk *clk = clk_get(NULL, id);
226
227 if (!dev || !clk || !IS_ERR(clk_get(dev, func)))
228 return;
229
230 clk->function = func;
231 clk->dev = dev;
232}
233
234/* clocks cannot be de-registered no refcounting necessary */
235struct clk *clk_get(struct device *dev, const char *id)
236{
237 struct clk *clk;
238
239 list_for_each_entry(clk, &clocks, node) {
240 if (strcmp(id, clk->name) == 0)
241 return clk;
242 if (clk->function && (dev == clk->dev) && strcmp(id, clk->function) == 0)
243 return clk;
244 }
245
246 return ERR_PTR(-ENOENT);
247}
248EXPORT_SYMBOL(clk_get);
249
250void clk_put(struct clk *clk)
251{
252}
253EXPORT_SYMBOL(clk_put);
254
255static void __clk_enable(struct clk *clk) 218static void __clk_enable(struct clk *clk)
256{ 219{
257 if (clk->parent) 220 if (clk->parent)
@@ -498,32 +461,38 @@ postcore_initcall(at91_clk_debugfs_init);
498/*------------------------------------------------------------------------*/ 461/*------------------------------------------------------------------------*/
499 462
500/* Register a new clock */ 463/* Register a new clock */
464static void __init at91_clk_add(struct clk *clk)
465{
466 list_add_tail(&clk->node, &clocks);
467
468 clk->cl.con_id = clk->name;
469 clk->cl.clk = clk;
470 clkdev_add(&clk->cl);
471}
472
501int __init clk_register(struct clk *clk) 473int __init clk_register(struct clk *clk)
502{ 474{
503 if (clk_is_peripheral(clk)) { 475 if (clk_is_peripheral(clk)) {
504 if (!clk->parent) 476 if (!clk->parent)
505 clk->parent = &mck; 477 clk->parent = &mck;
506 clk->mode = pmc_periph_mode; 478 clk->mode = pmc_periph_mode;
507 list_add_tail(&clk->node, &clocks);
508 } 479 }
509 else if (clk_is_sys(clk)) { 480 else if (clk_is_sys(clk)) {
510 clk->parent = &mck; 481 clk->parent = &mck;
511 clk->mode = pmc_sys_mode; 482 clk->mode = pmc_sys_mode;
512
513 list_add_tail(&clk->node, &clocks);
514 } 483 }
515#ifdef CONFIG_AT91_PROGRAMMABLE_CLOCKS 484#ifdef CONFIG_AT91_PROGRAMMABLE_CLOCKS
516 else if (clk_is_programmable(clk)) { 485 else if (clk_is_programmable(clk)) {
517 clk->mode = pmc_sys_mode; 486 clk->mode = pmc_sys_mode;
518 init_programmable_clock(clk); 487 init_programmable_clock(clk);
519 list_add_tail(&clk->node, &clocks);
520 } 488 }
521#endif 489#endif
522 490
491 at91_clk_add(clk);
492
523 return 0; 493 return 0;
524} 494}
525 495
526
527/*------------------------------------------------------------------------*/ 496/*------------------------------------------------------------------------*/
528 497
529static u32 __init at91_pll_rate(struct clk *pll, u32 freq, u32 reg) 498static u32 __init at91_pll_rate(struct clk *pll, u32 freq, u32 reg)
@@ -630,7 +599,7 @@ static void __init at91_pllb_usbfs_clock_init(unsigned long main_clock)
630 at91_sys_write(AT91_PMC_SCER, AT91RM9200_PMC_MCKUDP); 599 at91_sys_write(AT91_PMC_SCER, AT91RM9200_PMC_MCKUDP);
631 } else if (cpu_is_at91sam9260() || cpu_is_at91sam9261() || 600 } else if (cpu_is_at91sam9260() || cpu_is_at91sam9261() ||
632 cpu_is_at91sam9263() || cpu_is_at91sam9g20() || 601 cpu_is_at91sam9263() || cpu_is_at91sam9g20() ||
633 cpu_is_at91sam9g10() || cpu_is_at572d940hf()) { 602 cpu_is_at91sam9g10()) {
634 uhpck.pmc_mask = AT91SAM926x_PMC_UHP; 603 uhpck.pmc_mask = AT91SAM926x_PMC_UHP;
635 udpck.pmc_mask = AT91SAM926x_PMC_UDP; 604 udpck.pmc_mask = AT91SAM926x_PMC_UDP;
636 } else if (cpu_is_at91cap9()) { 605 } else if (cpu_is_at91cap9()) {
@@ -754,19 +723,19 @@ int __init at91_clock_init(unsigned long main_clock)
754 723
755 /* Register the PMC's standard clocks */ 724 /* Register the PMC's standard clocks */
756 for (i = 0; i < ARRAY_SIZE(standard_pmc_clocks); i++) 725 for (i = 0; i < ARRAY_SIZE(standard_pmc_clocks); i++)
757 list_add_tail(&standard_pmc_clocks[i]->node, &clocks); 726 at91_clk_add(standard_pmc_clocks[i]);
758 727
759 if (cpu_has_pllb()) 728 if (cpu_has_pllb())
760 list_add_tail(&pllb.node, &clocks); 729 at91_clk_add(&pllb);
761 730
762 if (cpu_has_uhp()) 731 if (cpu_has_uhp())
763 list_add_tail(&uhpck.node, &clocks); 732 at91_clk_add(&uhpck);
764 733
765 if (cpu_has_udpfs()) 734 if (cpu_has_udpfs())
766 list_add_tail(&udpck.node, &clocks); 735 at91_clk_add(&udpck);
767 736
768 if (cpu_has_utmi()) 737 if (cpu_has_utmi())
769 list_add_tail(&utmi_clk.node, &clocks); 738 at91_clk_add(&utmi_clk);
770 739
771 /* MCK and CPU clock are "always on" */ 740 /* MCK and CPU clock are "always on" */
772 clk_enable(&mck); 741 clk_enable(&mck);
diff --git a/arch/arm/mach-at91/clock.h b/arch/arm/mach-at91/clock.h
index 6cf4b78e175d..c2e63e47dcbe 100644
--- a/arch/arm/mach-at91/clock.h
+++ b/arch/arm/mach-at91/clock.h
@@ -6,6 +6,8 @@
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8 8
9#include <linux/clkdev.h>
10
9#define CLK_TYPE_PRIMARY 0x1 11#define CLK_TYPE_PRIMARY 0x1
10#define CLK_TYPE_PLL 0x2 12#define CLK_TYPE_PLL 0x2
11#define CLK_TYPE_PROGRAMMABLE 0x4 13#define CLK_TYPE_PROGRAMMABLE 0x4
@@ -16,8 +18,7 @@
16struct clk { 18struct clk {
17 struct list_head node; 19 struct list_head node;
18 const char *name; /* unique clock name */ 20 const char *name; /* unique clock name */
19 const char *function; /* function of the clock */ 21 struct clk_lookup cl;
20 struct device *dev; /* device associated with function */
21 unsigned long rate_hz; 22 unsigned long rate_hz;
22 struct clk *parent; 23 struct clk *parent;
23 u32 pmc_mask; 24 u32 pmc_mask;
@@ -29,3 +30,18 @@ struct clk {
29 30
30 31
31extern int __init clk_register(struct clk *clk); 32extern int __init clk_register(struct clk *clk);
33extern struct clk mck;
34extern struct clk utmi_clk;
35
36#define CLKDEV_CON_ID(_id, _clk) \
37 { \
38 .con_id = _id, \
39 .clk = _clk, \
40 }
41
42#define CLKDEV_CON_DEV_ID(_con_id, _dev_id, _clk) \
43 { \
44 .con_id = _con_id, \
45 .dev_id = _dev_id, \
46 .clk = _clk, \
47 }
diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h
index 0c66deb2db39..8ff3418f3430 100644
--- a/arch/arm/mach-at91/generic.h
+++ b/arch/arm/mach-at91/generic.h
@@ -8,8 +8,21 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <linux/clkdev.h>
12
13 /* Map io */
14extern void __init at91rm9200_map_io(void);
15extern void __init at91sam9260_map_io(void);
16extern void __init at91sam9261_map_io(void);
17extern void __init at91sam9263_map_io(void);
18extern void __init at91sam9rl_map_io(void);
19extern void __init at91sam9g45_map_io(void);
20extern void __init at91x40_map_io(void);
21extern void __init at91cap9_map_io(void);
22
11 /* Processors */ 23 /* Processors */
12extern void __init at91rm9200_initialize(unsigned long main_clock, unsigned short banks); 24extern void __init at91rm9200_set_type(int type);
25extern void __init at91rm9200_initialize(unsigned long main_clock);
13extern void __init at91sam9260_initialize(unsigned long main_clock); 26extern void __init at91sam9260_initialize(unsigned long main_clock);
14extern void __init at91sam9261_initialize(unsigned long main_clock); 27extern void __init at91sam9261_initialize(unsigned long main_clock);
15extern void __init at91sam9263_initialize(unsigned long main_clock); 28extern void __init at91sam9263_initialize(unsigned long main_clock);
@@ -17,7 +30,6 @@ extern void __init at91sam9rl_initialize(unsigned long main_clock);
17extern void __init at91sam9g45_initialize(unsigned long main_clock); 30extern void __init at91sam9g45_initialize(unsigned long main_clock);
18extern void __init at91x40_initialize(unsigned long main_clock); 31extern void __init at91x40_initialize(unsigned long main_clock);
19extern void __init at91cap9_initialize(unsigned long main_clock); 32extern void __init at91cap9_initialize(unsigned long main_clock);
20extern void __init at572d940hf_initialize(unsigned long main_clock);
21 33
22 /* Interrupts */ 34 /* Interrupts */
23extern void __init at91rm9200_init_interrupts(unsigned int priority[]); 35extern void __init at91rm9200_init_interrupts(unsigned int priority[]);
@@ -28,7 +40,6 @@ extern void __init at91sam9rl_init_interrupts(unsigned int priority[]);
28extern void __init at91sam9g45_init_interrupts(unsigned int priority[]); 40extern void __init at91sam9g45_init_interrupts(unsigned int priority[]);
29extern void __init at91x40_init_interrupts(unsigned int priority[]); 41extern void __init at91x40_init_interrupts(unsigned int priority[]);
30extern void __init at91cap9_init_interrupts(unsigned int priority[]); 42extern void __init at91cap9_init_interrupts(unsigned int priority[]);
31extern void __init at572d940hf_init_interrupts(unsigned int priority[]);
32extern void __init at91_aic_init(unsigned int priority[]); 43extern void __init at91_aic_init(unsigned int priority[]);
33 44
34 /* Timer */ 45 /* Timer */
@@ -39,8 +50,19 @@ extern struct sys_timer at91x40_timer;
39 50
40 /* Clocks */ 51 /* Clocks */
41extern int __init at91_clock_init(unsigned long main_clock); 52extern int __init at91_clock_init(unsigned long main_clock);
53/*
54 * function to specify the clock of the default console. As we do not
55 * use the device/driver bus, the dev_name is not intialize. So we need
56 * to link the clock to a specific con_id only "usart"
57 */
58extern void __init at91rm9200_set_console_clock(int id);
59extern void __init at91sam9260_set_console_clock(int id);
60extern void __init at91sam9261_set_console_clock(int id);
61extern void __init at91sam9263_set_console_clock(int id);
62extern void __init at91sam9rl_set_console_clock(int id);
63extern void __init at91sam9g45_set_console_clock(int id);
64extern void __init at91cap9_set_console_clock(int id);
42struct device; 65struct device;
43extern void __init at91_clock_associate(const char *id, struct device *dev, const char *func);
44 66
45 /* Power Management */ 67 /* Power Management */
46extern void at91_irq_suspend(void); 68extern void at91_irq_suspend(void);
diff --git a/arch/arm/mach-at91/include/mach/at572d940hf.h b/arch/arm/mach-at91/include/mach/at572d940hf.h
deleted file mode 100644
index be510cfc56be..000000000000
--- a/arch/arm/mach-at91/include/mach/at572d940hf.h
+++ /dev/null
@@ -1,123 +0,0 @@
1/*
2 * include/mach/at572d940hf.h
3 *
4 * Antonio R. Costa <costa.antonior@gmail.com>
5 * Copyright (C) 2008 Atmel
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef AT572D940HF_H
24#define AT572D940HF_H
25
26/*
27 * Peripheral identifiers/interrupts.
28 */
29#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
30#define AT91_ID_SYS 1 /* System Peripherals */
31#define AT572D940HF_ID_PIOA 2 /* Parallel IO Controller A */
32#define AT572D940HF_ID_PIOB 3 /* Parallel IO Controller B */
33#define AT572D940HF_ID_PIOC 4 /* Parallel IO Controller C */
34#define AT572D940HF_ID_EMAC 5 /* MACB ethernet controller */
35#define AT572D940HF_ID_US0 6 /* USART 0 */
36#define AT572D940HF_ID_US1 7 /* USART 1 */
37#define AT572D940HF_ID_US2 8 /* USART 2 */
38#define AT572D940HF_ID_MCI 9 /* Multimedia Card Interface */
39#define AT572D940HF_ID_UDP 10 /* USB Device Port */
40#define AT572D940HF_ID_TWI0 11 /* Two-Wire Interface 0 */
41#define AT572D940HF_ID_SPI0 12 /* Serial Peripheral Interface 0 */
42#define AT572D940HF_ID_SPI1 13 /* Serial Peripheral Interface 1 */
43#define AT572D940HF_ID_SSC0 14 /* Serial Synchronous Controller 0 */
44#define AT572D940HF_ID_SSC1 15 /* Serial Synchronous Controller 1 */
45#define AT572D940HF_ID_SSC2 16 /* Serial Synchronous Controller 2 */
46#define AT572D940HF_ID_TC0 17 /* Timer Counter 0 */
47#define AT572D940HF_ID_TC1 18 /* Timer Counter 1 */
48#define AT572D940HF_ID_TC2 19 /* Timer Counter 2 */
49#define AT572D940HF_ID_UHP 20 /* USB Host port */
50#define AT572D940HF_ID_SSC3 21 /* Serial Synchronous Controller 3 */
51#define AT572D940HF_ID_TWI1 22 /* Two-Wire Interface 1 */
52#define AT572D940HF_ID_CAN0 23 /* CAN Controller 0 */
53#define AT572D940HF_ID_CAN1 24 /* CAN Controller 1 */
54#define AT572D940HF_ID_MHALT 25 /* mAgicV HALT line */
55#define AT572D940HF_ID_MSIRQ0 26 /* mAgicV SIRQ0 line */
56#define AT572D940HF_ID_MEXC 27 /* mAgicV exception line */
57#define AT572D940HF_ID_MEDMA 28 /* mAgicV end of DMA line */
58#define AT572D940HF_ID_IRQ0 29 /* External Interrupt Source (IRQ0) */
59#define AT572D940HF_ID_IRQ1 30 /* External Interrupt Source (IRQ1) */
60#define AT572D940HF_ID_IRQ2 31 /* External Interrupt Source (IRQ2) */
61
62
63/*
64 * User Peripheral physical base addresses.
65 */
66#define AT572D940HF_BASE_TCB 0xfffa0000
67#define AT572D940HF_BASE_TC0 0xfffa0000
68#define AT572D940HF_BASE_TC1 0xfffa0040
69#define AT572D940HF_BASE_TC2 0xfffa0080
70#define AT572D940HF_BASE_UDP 0xfffa4000
71#define AT572D940HF_BASE_MCI 0xfffa8000
72#define AT572D940HF_BASE_TWI0 0xfffac000
73#define AT572D940HF_BASE_US0 0xfffb0000
74#define AT572D940HF_BASE_US1 0xfffb4000
75#define AT572D940HF_BASE_US2 0xfffb8000
76#define AT572D940HF_BASE_SSC0 0xfffbc000
77#define AT572D940HF_BASE_SSC1 0xfffc0000
78#define AT572D940HF_BASE_SSC2 0xfffc4000
79#define AT572D940HF_BASE_SPI0 0xfffc8000
80#define AT572D940HF_BASE_SPI1 0xfffcc000
81#define AT572D940HF_BASE_SSC3 0xfffd0000
82#define AT572D940HF_BASE_TWI1 0xfffd4000
83#define AT572D940HF_BASE_EMAC 0xfffd8000
84#define AT572D940HF_BASE_CAN0 0xfffdc000
85#define AT572D940HF_BASE_CAN1 0xfffe0000
86#define AT91_BASE_SYS 0xffffea00
87
88
89/*
90 * System Peripherals (offset from AT91_BASE_SYS)
91 */
92#define AT91_SDRAMC0 (0xffffea00 - AT91_BASE_SYS)
93#define AT91_SMC (0xffffec00 - AT91_BASE_SYS)
94#define AT91_MATRIX (0xffffee00 - AT91_BASE_SYS)
95#define AT91_AIC (0xfffff000 - AT91_BASE_SYS)
96#define AT91_DBGU (0xfffff200 - AT91_BASE_SYS)
97#define AT91_PIOA (0xfffff400 - AT91_BASE_SYS)
98#define AT91_PIOB (0xfffff600 - AT91_BASE_SYS)
99#define AT91_PIOC (0xfffff800 - AT91_BASE_SYS)
100#define AT91_PMC (0xfffffc00 - AT91_BASE_SYS)
101#define AT91_RSTC (0xfffffd00 - AT91_BASE_SYS)
102#define AT91_RTT (0xfffffd20 - AT91_BASE_SYS)
103#define AT91_PIT (0xfffffd30 - AT91_BASE_SYS)
104#define AT91_WDT (0xfffffd40 - AT91_BASE_SYS)
105
106#define AT91_USART0 AT572D940HF_ID_US0
107#define AT91_USART1 AT572D940HF_ID_US1
108#define AT91_USART2 AT572D940HF_ID_US2
109
110
111/*
112 * Internal Memory.
113 */
114#define AT572D940HF_SRAM_BASE 0x00300000 /* Internal SRAM base address */
115#define AT572D940HF_SRAM_SIZE (48 * SZ_1K) /* Internal SRAM size (48Kb) */
116
117#define AT572D940HF_ROM_BASE 0x00400000 /* Internal ROM base address */
118#define AT572D940HF_ROM_SIZE SZ_32K /* Internal ROM size (32Kb) */
119
120#define AT572D940HF_UHP_BASE 0x00500000 /* USB Host controller */
121
122
123#endif
diff --git a/arch/arm/mach-at91/include/mach/at572d940hf_matrix.h b/arch/arm/mach-at91/include/mach/at572d940hf_matrix.h
deleted file mode 100644
index b6751df09488..000000000000
--- a/arch/arm/mach-at91/include/mach/at572d940hf_matrix.h
+++ /dev/null
@@ -1,123 +0,0 @@
1/*
2 * include/mach//at572d940hf_matrix.h
3 *
4 * Antonio R. Costa <costa.antonior@gmail.com>
5 * Copyright (C) 2008 Atmel
6 *
7 * Copyright (C) 2005 SAN People
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#ifndef AT572D940HF_MATRIX_H
25#define AT572D940HF_MATRIX_H
26
27#define AT91_MATRIX_MCFG0 (AT91_MATRIX + 0x00) /* Master Configuration Register 0 */
28#define AT91_MATRIX_MCFG1 (AT91_MATRIX + 0x04) /* Master Configuration Register 1 */
29#define AT91_MATRIX_MCFG2 (AT91_MATRIX + 0x08) /* Master Configuration Register 2 */
30#define AT91_MATRIX_MCFG3 (AT91_MATRIX + 0x0C) /* Master Configuration Register 3 */
31#define AT91_MATRIX_MCFG4 (AT91_MATRIX + 0x10) /* Master Configuration Register 4 */
32#define AT91_MATRIX_MCFG5 (AT91_MATRIX + 0x14) /* Master Configuration Register 5 */
33
34#define AT91_MATRIX_ULBT (7 << 0) /* Undefined Length Burst Type */
35#define AT91_MATRIX_ULBT_INFINITE (0 << 0)
36#define AT91_MATRIX_ULBT_SINGLE (1 << 0)
37#define AT91_MATRIX_ULBT_FOUR (2 << 0)
38#define AT91_MATRIX_ULBT_EIGHT (3 << 0)
39#define AT91_MATRIX_ULBT_SIXTEEN (4 << 0)
40
41#define AT91_MATRIX_SCFG0 (AT91_MATRIX + 0x40) /* Slave Configuration Register 0 */
42#define AT91_MATRIX_SCFG1 (AT91_MATRIX + 0x44) /* Slave Configuration Register 1 */
43#define AT91_MATRIX_SCFG2 (AT91_MATRIX + 0x48) /* Slave Configuration Register 2 */
44#define AT91_MATRIX_SCFG3 (AT91_MATRIX + 0x4C) /* Slave Configuration Register 3 */
45#define AT91_MATRIX_SCFG4 (AT91_MATRIX + 0x50) /* Slave Configuration Register 4 */
46#define AT91_MATRIX_SLOT_CYCLE (0xff << 0) /* Maximum Number of Allowed Cycles for a Burst */
47#define AT91_MATRIX_DEFMSTR_TYPE (3 << 16) /* Default Master Type */
48#define AT91_MATRIX_DEFMSTR_TYPE_NONE (0 << 16)
49#define AT91_MATRIX_DEFMSTR_TYPE_LAST (1 << 16)
50#define AT91_MATRIX_DEFMSTR_TYPE_FIXED (2 << 16)
51#define AT91_MATRIX_FIXED_DEFMSTR (0x7 << 18) /* Fixed Index of Default Master */
52#define AT91_MATRIX_ARBT (3 << 24) /* Arbitration Type */
53#define AT91_MATRIX_ARBT_ROUND_ROBIN (0 << 24)
54#define AT91_MATRIX_ARBT_FIXED_PRIORITY (1 << 24)
55
56#define AT91_MATRIX_PRAS0 (AT91_MATRIX + 0x80) /* Priority Register A for Slave 0 */
57#define AT91_MATRIX_PRAS1 (AT91_MATRIX + 0x88) /* Priority Register A for Slave 1 */
58#define AT91_MATRIX_PRAS2 (AT91_MATRIX + 0x90) /* Priority Register A for Slave 2 */
59#define AT91_MATRIX_PRAS3 (AT91_MATRIX + 0x98) /* Priority Register A for Slave 3 */
60#define AT91_MATRIX_PRAS4 (AT91_MATRIX + 0xA0) /* Priority Register A for Slave 4 */
61
62#define AT91_MATRIX_M0PR (3 << 0) /* Master 0 Priority */
63#define AT91_MATRIX_M1PR (3 << 4) /* Master 1 Priority */
64#define AT91_MATRIX_M2PR (3 << 8) /* Master 2 Priority */
65#define AT91_MATRIX_M3PR (3 << 12) /* Master 3 Priority */
66#define AT91_MATRIX_M4PR (3 << 16) /* Master 4 Priority */
67#define AT91_MATRIX_M5PR (3 << 20) /* Master 5 Priority */
68#define AT91_MATRIX_M6PR (3 << 24) /* Master 6 Priority */
69
70#define AT91_MATRIX_MRCR (AT91_MATRIX + 0x100) /* Master Remap Control Register */
71#define AT91_MATRIX_RCB0 (1 << 0) /* Remap Command for AHB Master 0 (ARM926EJ-S Instruction Master) */
72#define AT91_MATRIX_RCB1 (1 << 1) /* Remap Command for AHB Master 1 (ARM926EJ-S Data Master) */
73
74#define AT91_MATRIX_SFR0 (AT91_MATRIX + 0x110) /* Special Function Register 0 */
75#define AT91_MATRIX_SFR1 (AT91_MATRIX + 0x114) /* Special Function Register 1 */
76#define AT91_MATRIX_SFR2 (AT91_MATRIX + 0x118) /* Special Function Register 2 */
77#define AT91_MATRIX_SFR3 (AT91_MATRIX + 0x11C) /* Special Function Register 3 */
78#define AT91_MATRIX_SFR4 (AT91_MATRIX + 0x120) /* Special Function Register 4 */
79#define AT91_MATRIX_SFR5 (AT91_MATRIX + 0x124) /* Special Function Register 5 */
80#define AT91_MATRIX_SFR6 (AT91_MATRIX + 0x128) /* Special Function Register 6 */
81#define AT91_MATRIX_SFR7 (AT91_MATRIX + 0x12C) /* Special Function Register 7 */
82#define AT91_MATRIX_SFR8 (AT91_MATRIX + 0x130) /* Special Function Register 8 */
83#define AT91_MATRIX_SFR9 (AT91_MATRIX + 0x134) /* Special Function Register 9 */
84#define AT91_MATRIX_SFR10 (AT91_MATRIX + 0x138) /* Special Function Register 10 */
85#define AT91_MATRIX_SFR11 (AT91_MATRIX + 0x13C) /* Special Function Register 11 */
86#define AT91_MATRIX_SFR12 (AT91_MATRIX + 0x140) /* Special Function Register 12 */
87#define AT91_MATRIX_SFR13 (AT91_MATRIX + 0x144) /* Special Function Register 13 */
88#define AT91_MATRIX_SFR14 (AT91_MATRIX + 0x148) /* Special Function Register 14 */
89#define AT91_MATRIX_SFR15 (AT91_MATRIX + 0x14C) /* Special Function Register 15 */
90
91
92/*
93 * The following registers / bits are not defined in the Datasheet (Revision A)
94 */
95
96#define AT91_MATRIX_TCR (AT91_MATRIX + 0x100) /* TCM Configuration Register */
97#define AT91_MATRIX_ITCM_SIZE (0xf << 0) /* Size of ITCM enabled memory block */
98#define AT91_MATRIX_ITCM_0 (0 << 0)
99#define AT91_MATRIX_ITCM_16 (5 << 0)
100#define AT91_MATRIX_ITCM_32 (6 << 0)
101#define AT91_MATRIX_ITCM_64 (7 << 0)
102#define AT91_MATRIX_DTCM_SIZE (0xf << 4) /* Size of DTCM enabled memory block */
103#define AT91_MATRIX_DTCM_0 (0 << 4)
104#define AT91_MATRIX_DTCM_16 (5 << 4)
105#define AT91_MATRIX_DTCM_32 (6 << 4)
106#define AT91_MATRIX_DTCM_64 (7 << 4)
107
108#define AT91_MATRIX_EBICSA (AT91_MATRIX + 0x11C) /* EBI Chip Select Assignment Register */
109#define AT91_MATRIX_CS1A (1 << 1) /* Chip Select 1 Assignment */
110#define AT91_MATRIX_CS1A_SMC (0 << 1)
111#define AT91_MATRIX_CS1A_SDRAMC (1 << 1)
112#define AT91_MATRIX_CS3A (1 << 3) /* Chip Select 3 Assignment */
113#define AT91_MATRIX_CS3A_SMC (0 << 3)
114#define AT91_MATRIX_CS3A_SMC_SMARTMEDIA (1 << 3)
115#define AT91_MATRIX_CS4A (1 << 4) /* Chip Select 4 Assignment */
116#define AT91_MATRIX_CS4A_SMC (0 << 4)
117#define AT91_MATRIX_CS4A_SMC_CF1 (1 << 4)
118#define AT91_MATRIX_CS5A (1 << 5) /* Chip Select 5 Assignment */
119#define AT91_MATRIX_CS5A_SMC (0 << 5)
120#define AT91_MATRIX_CS5A_SMC_CF2 (1 << 5)
121#define AT91_MATRIX_DBPUC (1 << 8) /* Data Bus Pull-up Configuration */
122
123#endif
diff --git a/arch/arm/mach-at91/include/mach/at91cap9.h b/arch/arm/mach-at91/include/mach/at91cap9.h
index 9c6af9737485..665993849a7b 100644
--- a/arch/arm/mach-at91/include/mach/at91cap9.h
+++ b/arch/arm/mach-at91/include/mach/at91cap9.h
@@ -20,8 +20,6 @@
20/* 20/*
21 * Peripheral identifiers/interrupts. 21 * Peripheral identifiers/interrupts.
22 */ 22 */
23#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
24#define AT91_ID_SYS 1 /* System Peripherals */
25#define AT91CAP9_ID_PIOABCD 2 /* Parallel IO Controller A, B, C and D */ 23#define AT91CAP9_ID_PIOABCD 2 /* Parallel IO Controller A, B, C and D */
26#define AT91CAP9_ID_MPB0 3 /* MP Block Peripheral 0 */ 24#define AT91CAP9_ID_MPB0 3 /* MP Block Peripheral 0 */
27#define AT91CAP9_ID_MPB1 4 /* MP Block Peripheral 1 */ 25#define AT91CAP9_ID_MPB1 4 /* MP Block Peripheral 1 */
@@ -123,6 +121,4 @@
123#define AT91CAP9_UDPHS_FIFO 0x00600000 /* USB High Speed Device Port */ 121#define AT91CAP9_UDPHS_FIFO 0x00600000 /* USB High Speed Device Port */
124#define AT91CAP9_UHP_BASE 0x00700000 /* USB Host controller */ 122#define AT91CAP9_UHP_BASE 0x00700000 /* USB Host controller */
125 123
126#define CONFIG_DRAM_BASE AT91_CHIPSELECT_6
127
128#endif 124#endif
diff --git a/arch/arm/mach-at91/include/mach/at91rm9200.h b/arch/arm/mach-at91/include/mach/at91rm9200.h
index 78983155a074..99e0f8d02d7b 100644
--- a/arch/arm/mach-at91/include/mach/at91rm9200.h
+++ b/arch/arm/mach-at91/include/mach/at91rm9200.h
@@ -19,8 +19,6 @@
19/* 19/*
20 * Peripheral identifiers/interrupts. 20 * Peripheral identifiers/interrupts.
21 */ 21 */
22#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
23#define AT91_ID_SYS 1 /* System Peripheral */
24#define AT91RM9200_ID_PIOA 2 /* Parallel IO Controller A */ 22#define AT91RM9200_ID_PIOA 2 /* Parallel IO Controller A */
25#define AT91RM9200_ID_PIOB 3 /* Parallel IO Controller B */ 23#define AT91RM9200_ID_PIOB 3 /* Parallel IO Controller B */
26#define AT91RM9200_ID_PIOC 4 /* Parallel IO Controller C */ 24#define AT91RM9200_ID_PIOC 4 /* Parallel IO Controller C */
diff --git a/arch/arm/mach-at91/include/mach/at91sam9260.h b/arch/arm/mach-at91/include/mach/at91sam9260.h
index 4e79036d3b80..8b6bf835cd73 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9260.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9260.h
@@ -20,8 +20,6 @@
20/* 20/*
21 * Peripheral identifiers/interrupts. 21 * Peripheral identifiers/interrupts.
22 */ 22 */
23#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
24#define AT91_ID_SYS 1 /* System Peripherals */
25#define AT91SAM9260_ID_PIOA 2 /* Parallel IO Controller A */ 23#define AT91SAM9260_ID_PIOA 2 /* Parallel IO Controller A */
26#define AT91SAM9260_ID_PIOB 3 /* Parallel IO Controller B */ 24#define AT91SAM9260_ID_PIOB 3 /* Parallel IO Controller B */
27#define AT91SAM9260_ID_PIOC 4 /* Parallel IO Controller C */ 25#define AT91SAM9260_ID_PIOC 4 /* Parallel IO Controller C */
diff --git a/arch/arm/mach-at91/include/mach/at91sam9261.h b/arch/arm/mach-at91/include/mach/at91sam9261.h
index 2b5618518129..eafbddaf523c 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9261.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9261.h
@@ -18,8 +18,6 @@
18/* 18/*
19 * Peripheral identifiers/interrupts. 19 * Peripheral identifiers/interrupts.
20 */ 20 */
21#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
22#define AT91_ID_SYS 1 /* System Peripherals */
23#define AT91SAM9261_ID_PIOA 2 /* Parallel IO Controller A */ 21#define AT91SAM9261_ID_PIOA 2 /* Parallel IO Controller A */
24#define AT91SAM9261_ID_PIOB 3 /* Parallel IO Controller B */ 22#define AT91SAM9261_ID_PIOB 3 /* Parallel IO Controller B */
25#define AT91SAM9261_ID_PIOC 4 /* Parallel IO Controller C */ 23#define AT91SAM9261_ID_PIOC 4 /* Parallel IO Controller C */
diff --git a/arch/arm/mach-at91/include/mach/at91sam9263.h b/arch/arm/mach-at91/include/mach/at91sam9263.h
index 2091f1e42d43..e2d348213a7b 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9263.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9263.h
@@ -18,8 +18,6 @@
18/* 18/*
19 * Peripheral identifiers/interrupts. 19 * Peripheral identifiers/interrupts.
20 */ 20 */
21#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
22#define AT91_ID_SYS 1 /* System Peripherals */
23#define AT91SAM9263_ID_PIOA 2 /* Parallel IO Controller A */ 21#define AT91SAM9263_ID_PIOA 2 /* Parallel IO Controller A */
24#define AT91SAM9263_ID_PIOB 3 /* Parallel IO Controller B */ 22#define AT91SAM9263_ID_PIOB 3 /* Parallel IO Controller B */
25#define AT91SAM9263_ID_PIOCDE 4 /* Parallel IO Controller C, D and E */ 23#define AT91SAM9263_ID_PIOCDE 4 /* Parallel IO Controller C, D and E */
diff --git a/arch/arm/mach-at91/include/mach/at91sam9g45.h b/arch/arm/mach-at91/include/mach/at91sam9g45.h
index a526869aee37..659304aa73d9 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9g45.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9g45.h
@@ -18,8 +18,6 @@
18/* 18/*
19 * Peripheral identifiers/interrupts. 19 * Peripheral identifiers/interrupts.
20 */ 20 */
21#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
22#define AT91_ID_SYS 1 /* System Controller Interrupt */
23#define AT91SAM9G45_ID_PIOA 2 /* Parallel I/O Controller A */ 21#define AT91SAM9G45_ID_PIOA 2 /* Parallel I/O Controller A */
24#define AT91SAM9G45_ID_PIOB 3 /* Parallel I/O Controller B */ 22#define AT91SAM9G45_ID_PIOB 3 /* Parallel I/O Controller B */
25#define AT91SAM9G45_ID_PIOC 4 /* Parallel I/O Controller C */ 23#define AT91SAM9G45_ID_PIOC 4 /* Parallel I/O Controller C */
@@ -131,8 +129,6 @@
131#define AT91SAM9G45_EHCI_BASE 0x00800000 /* USB Host controller (EHCI) */ 129#define AT91SAM9G45_EHCI_BASE 0x00800000 /* USB Host controller (EHCI) */
132#define AT91SAM9G45_VDEC_BASE 0x00900000 /* Video Decoder Controller */ 130#define AT91SAM9G45_VDEC_BASE 0x00900000 /* Video Decoder Controller */
133 131
134#define CONFIG_DRAM_BASE AT91_CHIPSELECT_6
135
136#define CONSISTENT_DMA_SIZE SZ_4M 132#define CONSISTENT_DMA_SIZE SZ_4M
137 133
138/* 134/*
diff --git a/arch/arm/mach-at91/include/mach/at91sam9rl.h b/arch/arm/mach-at91/include/mach/at91sam9rl.h
index 87ba8517ad98..41dbbe61055c 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9rl.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9rl.h
@@ -17,8 +17,6 @@
17/* 17/*
18 * Peripheral identifiers/interrupts. 18 * Peripheral identifiers/interrupts.
19 */ 19 */
20#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
21#define AT91_ID_SYS 1 /* System Controller */
22#define AT91SAM9RL_ID_PIOA 2 /* Parallel IO Controller A */ 20#define AT91SAM9RL_ID_PIOA 2 /* Parallel IO Controller A */
23#define AT91SAM9RL_ID_PIOB 3 /* Parallel IO Controller B */ 21#define AT91SAM9RL_ID_PIOB 3 /* Parallel IO Controller B */
24#define AT91SAM9RL_ID_PIOC 4 /* Parallel IO Controller C */ 22#define AT91SAM9RL_ID_PIOC 4 /* Parallel IO Controller C */
diff --git a/arch/arm/mach-at91/include/mach/at91x40.h b/arch/arm/mach-at91/include/mach/at91x40.h
index 063ac44a0204..a152ff87e688 100644
--- a/arch/arm/mach-at91/include/mach/at91x40.h
+++ b/arch/arm/mach-at91/include/mach/at91x40.h
@@ -15,8 +15,6 @@
15/* 15/*
16 * IRQ list. 16 * IRQ list.
17 */ 17 */
18#define AT91_ID_FIQ 0 /* FIQ */
19#define AT91_ID_SYS 1 /* System Peripheral */
20#define AT91X40_ID_USART0 2 /* USART port 0 */ 18#define AT91X40_ID_USART0 2 /* USART port 0 */
21#define AT91X40_ID_USART1 3 /* USART port 1 */ 19#define AT91X40_ID_USART1 3 /* USART port 1 */
22#define AT91X40_ID_TC0 4 /* Timer/Counter 0 */ 20#define AT91X40_ID_TC0 4 /* Timer/Counter 0 */
diff --git a/arch/arm/mach-at91/include/mach/board.h b/arch/arm/mach-at91/include/mach/board.h
index 2b499eb343a1..ed544a0d5a1d 100644
--- a/arch/arm/mach-at91/include/mach/board.h
+++ b/arch/arm/mach-at91/include/mach/board.h
@@ -90,7 +90,7 @@ struct at91_eth_data {
90extern void __init at91_add_device_eth(struct at91_eth_data *data); 90extern void __init at91_add_device_eth(struct at91_eth_data *data);
91 91
92#if defined(CONFIG_ARCH_AT91SAM9260) || defined(CONFIG_ARCH_AT91SAM9263) || defined(CONFIG_ARCH_AT91SAM9G20) || defined(CONFIG_ARCH_AT91CAP9) \ 92#if defined(CONFIG_ARCH_AT91SAM9260) || defined(CONFIG_ARCH_AT91SAM9263) || defined(CONFIG_ARCH_AT91SAM9G20) || defined(CONFIG_ARCH_AT91CAP9) \
93 || defined(CONFIG_ARCH_AT91SAM9G45) || defined(CONFIG_ARCH_AT572D940HF) 93 || defined(CONFIG_ARCH_AT91SAM9G45)
94#define eth_platform_data at91_eth_data 94#define eth_platform_data at91_eth_data
95#endif 95#endif
96 96
@@ -140,6 +140,7 @@ extern void __init at91_set_serial_console(unsigned portnr);
140extern struct platform_device *atmel_default_console_device; 140extern struct platform_device *atmel_default_console_device;
141 141
142struct atmel_uart_data { 142struct atmel_uart_data {
143 int num; /* port num */
143 short use_dma_tx; /* use transmit DMA? */ 144 short use_dma_tx; /* use transmit DMA? */
144 short use_dma_rx; /* use receive DMA? */ 145 short use_dma_rx; /* use receive DMA? */
145 void __iomem *regs; /* virt. base address, if any */ 146 void __iomem *regs; /* virt. base address, if any */
@@ -203,9 +204,6 @@ extern void __init at91_init_leds(u8 cpu_led, u8 timer_led);
203extern void __init at91_gpio_leds(struct gpio_led *leds, int nr); 204extern void __init at91_gpio_leds(struct gpio_led *leds, int nr);
204extern void __init at91_pwm_leds(struct gpio_led *leds, int nr); 205extern void __init at91_pwm_leds(struct gpio_led *leds, int nr);
205 206
206 /* AT572D940HF DSP */
207extern void __init at91_add_device_mAgic(void);
208
209/* FIXME: this needs a better location, but gets stuff building again */ 207/* FIXME: this needs a better location, but gets stuff building again */
210extern int at91_suspend_entering_slow_clock(void); 208extern int at91_suspend_entering_slow_clock(void);
211 209
diff --git a/arch/arm/mach-at91/include/mach/clkdev.h b/arch/arm/mach-at91/include/mach/clkdev.h
new file mode 100644
index 000000000000..04b37a89801c
--- /dev/null
+++ b/arch/arm/mach-at91/include/mach/clkdev.h
@@ -0,0 +1,7 @@
1#ifndef __ASM_MACH_CLKDEV_H
2#define __ASM_MACH_CLKDEV_H
3
4#define __clk_get(clk) ({ 1; })
5#define __clk_put(clk) do { } while (0)
6
7#endif
diff --git a/arch/arm/mach-at91/include/mach/cpu.h b/arch/arm/mach-at91/include/mach/cpu.h
index 0700f2125305..df966c2bc2d4 100644
--- a/arch/arm/mach-at91/include/mach/cpu.h
+++ b/arch/arm/mach-at91/include/mach/cpu.h
@@ -34,8 +34,6 @@
34#define ARCH_ID_AT91SAM9XE256 0x329a93a0 34#define ARCH_ID_AT91SAM9XE256 0x329a93a0
35#define ARCH_ID_AT91SAM9XE512 0x329aa3a0 35#define ARCH_ID_AT91SAM9XE512 0x329aa3a0
36 36
37#define ARCH_ID_AT572D940HF 0x0e0303e0
38
39#define ARCH_ID_AT91M40800 0x14080044 37#define ARCH_ID_AT91M40800 0x14080044
40#define ARCH_ID_AT91R40807 0x44080746 38#define ARCH_ID_AT91R40807 0x44080746
41#define ARCH_ID_AT91M40807 0x14080745 39#define ARCH_ID_AT91M40807 0x14080745
@@ -90,9 +88,16 @@ static inline unsigned long at91cap9_rev_identify(void)
90#endif 88#endif
91 89
92#ifdef CONFIG_ARCH_AT91RM9200 90#ifdef CONFIG_ARCH_AT91RM9200
91extern int rm9200_type;
92#define ARCH_REVISON_9200_BGA (0 << 0)
93#define ARCH_REVISON_9200_PQFP (1 << 0)
93#define cpu_is_at91rm9200() (at91_cpu_identify() == ARCH_ID_AT91RM9200) 94#define cpu_is_at91rm9200() (at91_cpu_identify() == ARCH_ID_AT91RM9200)
95#define cpu_is_at91rm9200_bga() (!cpu_is_at91rm9200_pqfp())
96#define cpu_is_at91rm9200_pqfp() (cpu_is_at91rm9200() && rm9200_type & ARCH_REVISON_9200_PQFP)
94#else 97#else
95#define cpu_is_at91rm9200() (0) 98#define cpu_is_at91rm9200() (0)
99#define cpu_is_at91rm9200_bga() (0)
100#define cpu_is_at91rm9200_pqfp() (0)
96#endif 101#endif
97 102
98#ifdef CONFIG_ARCH_AT91SAM9260 103#ifdef CONFIG_ARCH_AT91SAM9260
@@ -181,12 +186,6 @@ static inline unsigned long at91cap9_rev_identify(void)
181#define cpu_is_at91cap9_revC() (0) 186#define cpu_is_at91cap9_revC() (0)
182#endif 187#endif
183 188
184#ifdef CONFIG_ARCH_AT572D940HF
185#define cpu_is_at572d940hf() (at91_cpu_identify() == ARCH_ID_AT572D940HF)
186#else
187#define cpu_is_at572d940hf() (0)
188#endif
189
190/* 189/*
191 * Since this is ARM, we will never run on any AVR32 CPU. But these 190 * Since this is ARM, we will never run on any AVR32 CPU. But these
192 * definitions may reduce clutter in common drivers. 191 * definitions may reduce clutter in common drivers.
diff --git a/arch/arm/mach-at91/include/mach/hardware.h b/arch/arm/mach-at91/include/mach/hardware.h
index 3d64a75e3ed5..1008b9fb5074 100644
--- a/arch/arm/mach-at91/include/mach/hardware.h
+++ b/arch/arm/mach-at91/include/mach/hardware.h
@@ -32,13 +32,17 @@
32#include <mach/at91cap9.h> 32#include <mach/at91cap9.h>
33#elif defined(CONFIG_ARCH_AT91X40) 33#elif defined(CONFIG_ARCH_AT91X40)
34#include <mach/at91x40.h> 34#include <mach/at91x40.h>
35#elif defined(CONFIG_ARCH_AT572D940HF)
36#include <mach/at572d940hf.h>
37#else 35#else
38#error "Unsupported AT91 processor" 36#error "Unsupported AT91 processor"
39#endif 37#endif
40 38
41 39
40/*
41 * Peripheral identifiers/interrupts.
42 */
43#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
44#define AT91_ID_SYS 1 /* System Peripherals */
45
42#ifdef CONFIG_MMU 46#ifdef CONFIG_MMU
43/* 47/*
44 * Remap the peripherals from address 0xFFF78000 .. 0xFFFFFFFF 48 * Remap the peripherals from address 0xFFF78000 .. 0xFFFFFFFF
@@ -82,13 +86,6 @@
82#define AT91_CHIPSELECT_6 0x70000000 86#define AT91_CHIPSELECT_6 0x70000000
83#define AT91_CHIPSELECT_7 0x80000000 87#define AT91_CHIPSELECT_7 0x80000000
84 88
85/* SDRAM */
86#ifdef CONFIG_DRAM_BASE
87#define AT91_SDRAM_BASE CONFIG_DRAM_BASE
88#else
89#define AT91_SDRAM_BASE AT91_CHIPSELECT_1
90#endif
91
92/* Clocks */ 89/* Clocks */
93#define AT91_SLOW_CLOCK 32768 /* slow clock */ 90#define AT91_SLOW_CLOCK 32768 /* slow clock */
94 91
diff --git a/arch/arm/mach-at91/include/mach/memory.h b/arch/arm/mach-at91/include/mach/memory.h
index c2cfe5040642..401c207f2f39 100644
--- a/arch/arm/mach-at91/include/mach/memory.h
+++ b/arch/arm/mach-at91/include/mach/memory.h
@@ -23,6 +23,4 @@
23 23
24#include <mach/hardware.h> 24#include <mach/hardware.h>
25 25
26#define PLAT_PHYS_OFFSET (AT91_SDRAM_BASE)
27
28#endif 26#endif
diff --git a/arch/arm/mach-at91/include/mach/stamp9g20.h b/arch/arm/mach-at91/include/mach/stamp9g20.h
index 6120f9c46d59..f62c0abca4b4 100644
--- a/arch/arm/mach-at91/include/mach/stamp9g20.h
+++ b/arch/arm/mach-at91/include/mach/stamp9g20.h
@@ -1,7 +1,7 @@
1#ifndef __MACH_STAMP9G20_H 1#ifndef __MACH_STAMP9G20_H
2#define __MACH_STAMP9G20_H 2#define __MACH_STAMP9G20_H
3 3
4void stamp9g20_map_io(void); 4void stamp9g20_init_early(void);
5void stamp9g20_board_init(void); 5void stamp9g20_board_init(void);
6 6
7#endif 7#endif
diff --git a/arch/arm/mach-at91/include/mach/system_rev.h b/arch/arm/mach-at91/include/mach/system_rev.h
new file mode 100644
index 000000000000..b855ee75f72c
--- /dev/null
+++ b/arch/arm/mach-at91/include/mach/system_rev.h
@@ -0,0 +1,25 @@
1/*
2 * Copyright (C) 2011 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
3 *
4 * Under GPLv2 only
5 */
6
7#ifndef __ARCH_SYSTEM_REV_H__
8#define __ARCH_SYSTEM_REV_H__
9
10/*
11 * board revision encoding
12 * mach specific
13 * the 16-31 bit are reserved for at91 generic information
14 *
15 * bit 31:
16 * 0 => nand 16 bit
17 * 1 => nand 8 bit
18 */
19#define BOARD_HAVE_NAND_8BIT (1 << 31)
20static int inline board_have_nand_8bit(void)
21{
22 return system_rev & BOARD_HAVE_NAND_8BIT;
23}
24
25#endif /* __ARCH_SYSTEM_REV_H__ */
diff --git a/arch/arm/mach-at91/include/mach/timex.h b/arch/arm/mach-at91/include/mach/timex.h
index 05a6e8af80c4..31ac2d97f14c 100644
--- a/arch/arm/mach-at91/include/mach/timex.h
+++ b/arch/arm/mach-at91/include/mach/timex.h
@@ -82,11 +82,6 @@
82#define AT91X40_MASTER_CLOCK 40000000 82#define AT91X40_MASTER_CLOCK 40000000
83#define CLOCK_TICK_RATE (AT91X40_MASTER_CLOCK) 83#define CLOCK_TICK_RATE (AT91X40_MASTER_CLOCK)
84 84
85#elif defined(CONFIG_ARCH_AT572D940HF)
86
87#define AT572D940HF_MASTER_CLOCK 80000000
88#define CLOCK_TICK_RATE (AT572D940HF_MASTER_CLOCK/16)
89
90#endif 85#endif
91 86
92#endif 87#endif
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index b95b9196deed..133aac405853 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -1055,7 +1055,7 @@ int da850_register_pm(struct platform_device *pdev)
1055 if (!pdata->cpupll_reg_base) 1055 if (!pdata->cpupll_reg_base)
1056 return -ENOMEM; 1056 return -ENOMEM;
1057 1057
1058 pdata->ddrpll_reg_base = ioremap(DA8XX_PLL1_BASE, SZ_4K); 1058 pdata->ddrpll_reg_base = ioremap(DA850_PLL1_BASE, SZ_4K);
1059 if (!pdata->ddrpll_reg_base) { 1059 if (!pdata->ddrpll_reg_base) {
1060 ret = -ENOMEM; 1060 ret = -ENOMEM;
1061 goto no_ddrpll_mem; 1061 goto no_ddrpll_mem;
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 58a02dc7b15a..4e66881c7aee 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -24,23 +24,25 @@
24#include "clock.h" 24#include "clock.h"
25 25
26#define DA8XX_TPCC_BASE 0x01c00000 26#define DA8XX_TPCC_BASE 0x01c00000
27#define DA850_MMCSD1_BASE 0x01e1b000
28#define DA850_TPCC1_BASE 0x01e30000
29#define DA8XX_TPTC0_BASE 0x01c08000 27#define DA8XX_TPTC0_BASE 0x01c08000
30#define DA8XX_TPTC1_BASE 0x01c08400 28#define DA8XX_TPTC1_BASE 0x01c08400
31#define DA850_TPTC2_BASE 0x01e38000
32#define DA8XX_WDOG_BASE 0x01c21000 /* DA8XX_TIMER64P1_BASE */ 29#define DA8XX_WDOG_BASE 0x01c21000 /* DA8XX_TIMER64P1_BASE */
33#define DA8XX_I2C0_BASE 0x01c22000 30#define DA8XX_I2C0_BASE 0x01c22000
34#define DA8XX_RTC_BASE 0x01C23000 31#define DA8XX_RTC_BASE 0x01c23000
32#define DA8XX_MMCSD0_BASE 0x01c40000
33#define DA8XX_SPI0_BASE 0x01c41000
34#define DA830_SPI1_BASE 0x01e12000
35#define DA8XX_LCD_CNTRL_BASE 0x01e13000
36#define DA850_MMCSD1_BASE 0x01e1b000
35#define DA8XX_EMAC_CPPI_PORT_BASE 0x01e20000 37#define DA8XX_EMAC_CPPI_PORT_BASE 0x01e20000
36#define DA8XX_EMAC_CPGMACSS_BASE 0x01e22000 38#define DA8XX_EMAC_CPGMACSS_BASE 0x01e22000
37#define DA8XX_EMAC_CPGMAC_BASE 0x01e23000 39#define DA8XX_EMAC_CPGMAC_BASE 0x01e23000
38#define DA8XX_EMAC_MDIO_BASE 0x01e24000 40#define DA8XX_EMAC_MDIO_BASE 0x01e24000
39#define DA8XX_GPIO_BASE 0x01e26000
40#define DA8XX_I2C1_BASE 0x01e28000 41#define DA8XX_I2C1_BASE 0x01e28000
41#define DA8XX_SPI0_BASE 0x01c41000 42#define DA850_TPCC1_BASE 0x01e30000
42#define DA830_SPI1_BASE 0x01e12000 43#define DA850_TPTC2_BASE 0x01e38000
43#define DA850_SPI1_BASE 0x01f0e000 44#define DA850_SPI1_BASE 0x01f0e000
45#define DA8XX_DDR2_CTL_BASE 0xb0000000
44 46
45#define DA8XX_EMAC_CTRL_REG_OFFSET 0x3000 47#define DA8XX_EMAC_CTRL_REG_OFFSET 0x3000
46#define DA8XX_EMAC_MOD_REG_OFFSET 0x2000 48#define DA8XX_EMAC_MOD_REG_OFFSET 0x2000
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index 22ebc64bc9d9..8f4f736aa267 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -33,6 +33,9 @@
33#define DM365_MMCSD0_BASE 0x01D11000 33#define DM365_MMCSD0_BASE 0x01D11000
34#define DM365_MMCSD1_BASE 0x01D00000 34#define DM365_MMCSD1_BASE 0x01D00000
35 35
36/* System control register offsets */
37#define DM64XX_VDD3P3V_PWDN 0x48
38
36static struct resource i2c_resources[] = { 39static struct resource i2c_resources[] = {
37 { 40 {
38 .start = DAVINCI_I2C_BASE, 41 .start = DAVINCI_I2C_BASE,
diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h
index e4fc1af8500e..ad64da713fc8 100644
--- a/arch/arm/mach-davinci/include/mach/da8xx.h
+++ b/arch/arm/mach-davinci/include/mach/da8xx.h
@@ -64,13 +64,9 @@ extern unsigned int da850_max_speed;
64#define DA8XX_TIMER64P1_BASE 0x01c21000 64#define DA8XX_TIMER64P1_BASE 0x01c21000
65#define DA8XX_GPIO_BASE 0x01e26000 65#define DA8XX_GPIO_BASE 0x01e26000
66#define DA8XX_PSC1_BASE 0x01e27000 66#define DA8XX_PSC1_BASE 0x01e27000
67#define DA8XX_LCD_CNTRL_BASE 0x01e13000
68#define DA8XX_PLL1_BASE 0x01e1a000
69#define DA8XX_MMCSD0_BASE 0x01c40000
70#define DA8XX_AEMIF_CS2_BASE 0x60000000 67#define DA8XX_AEMIF_CS2_BASE 0x60000000
71#define DA8XX_AEMIF_CS3_BASE 0x62000000 68#define DA8XX_AEMIF_CS3_BASE 0x62000000
72#define DA8XX_AEMIF_CTL_BASE 0x68000000 69#define DA8XX_AEMIF_CTL_BASE 0x68000000
73#define DA8XX_DDR2_CTL_BASE 0xb0000000
74#define DA8XX_ARM_RAM_BASE 0xffff0000 70#define DA8XX_ARM_RAM_BASE 0xffff0000
75 71
76void __init da830_init(void); 72void __init da830_init(void);
diff --git a/arch/arm/mach-davinci/include/mach/hardware.h b/arch/arm/mach-davinci/include/mach/hardware.h
index c45ba1f62a11..414e0b93e741 100644
--- a/arch/arm/mach-davinci/include/mach/hardware.h
+++ b/arch/arm/mach-davinci/include/mach/hardware.h
@@ -21,9 +21,6 @@
21 */ 21 */
22#define DAVINCI_SYSTEM_MODULE_BASE 0x01C40000 22#define DAVINCI_SYSTEM_MODULE_BASE 0x01C40000
23 23
24/* System control register offsets */
25#define DM64XX_VDD3P3V_PWDN 0x48
26
27/* 24/*
28 * I/O mapping 25 * I/O mapping
29 */ 26 */
diff --git a/arch/arm/mach-exynos4/Kconfig b/arch/arm/mach-exynos4/Kconfig
index 805196207ce8..b92c1e557145 100644
--- a/arch/arm/mach-exynos4/Kconfig
+++ b/arch/arm/mach-exynos4/Kconfig
@@ -169,9 +169,11 @@ config MACH_NURI
169 select S3C_DEV_HSMMC2 169 select S3C_DEV_HSMMC2
170 select S3C_DEV_HSMMC3 170 select S3C_DEV_HSMMC3
171 select S3C_DEV_I2C1 171 select S3C_DEV_I2C1
172 select S3C_DEV_I2C3
172 select S3C_DEV_I2C5 173 select S3C_DEV_I2C5
173 select S5P_DEV_USB_EHCI 174 select S5P_DEV_USB_EHCI
174 select EXYNOS4_SETUP_I2C1 175 select EXYNOS4_SETUP_I2C1
176 select EXYNOS4_SETUP_I2C3
175 select EXYNOS4_SETUP_I2C5 177 select EXYNOS4_SETUP_I2C5
176 select EXYNOS4_SETUP_SDHCI 178 select EXYNOS4_SETUP_SDHCI
177 select SAMSUNG_DEV_PWM 179 select SAMSUNG_DEV_PWM
diff --git a/arch/arm/mach-exynos4/Makefile b/arch/arm/mach-exynos4/Makefile
index 97837e75afa3..a9bb94fabaa7 100644
--- a/arch/arm/mach-exynos4/Makefile
+++ b/arch/arm/mach-exynos4/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_CPU_EXYNOS4210) += cpu.o init.o clock.o irq-combiner.o
16obj-$(CONFIG_CPU_EXYNOS4210) += setup-i2c0.o irq-eint.o dma.o 16obj-$(CONFIG_CPU_EXYNOS4210) += setup-i2c0.o irq-eint.o dma.o
17obj-$(CONFIG_PM) += pm.o sleep.o 17obj-$(CONFIG_PM) += pm.o sleep.o
18obj-$(CONFIG_CPU_FREQ) += cpufreq.o 18obj-$(CONFIG_CPU_FREQ) += cpufreq.o
19obj-$(CONFIG_CPU_IDLE) += cpuidle.o
19 20
20obj-$(CONFIG_SMP) += platsmp.o headsmp.o 21obj-$(CONFIG_SMP) += platsmp.o headsmp.o
21 22
diff --git a/arch/arm/mach-exynos4/cpuidle.c b/arch/arm/mach-exynos4/cpuidle.c
new file mode 100644
index 000000000000..bf7e96f2793a
--- /dev/null
+++ b/arch/arm/mach-exynos4/cpuidle.c
@@ -0,0 +1,86 @@
1/* linux/arch/arm/mach-exynos4/cpuidle.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9*/
10
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/cpuidle.h>
14#include <linux/io.h>
15
16#include <asm/proc-fns.h>
17
18static int exynos4_enter_idle(struct cpuidle_device *dev,
19 struct cpuidle_state *state);
20
21static struct cpuidle_state exynos4_cpuidle_set[] = {
22 [0] = {
23 .enter = exynos4_enter_idle,
24 .exit_latency = 1,
25 .target_residency = 100000,
26 .flags = CPUIDLE_FLAG_TIME_VALID,
27 .name = "IDLE",
28 .desc = "ARM clock gating(WFI)",
29 },
30};
31
32static DEFINE_PER_CPU(struct cpuidle_device, exynos4_cpuidle_device);
33
34static struct cpuidle_driver exynos4_idle_driver = {
35 .name = "exynos4_idle",
36 .owner = THIS_MODULE,
37};
38
39static int exynos4_enter_idle(struct cpuidle_device *dev,
40 struct cpuidle_state *state)
41{
42 struct timeval before, after;
43 int idle_time;
44
45 local_irq_disable();
46 do_gettimeofday(&before);
47
48 cpu_do_idle();
49
50 do_gettimeofday(&after);
51 local_irq_enable();
52 idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
53 (after.tv_usec - before.tv_usec);
54
55 return idle_time;
56}
57
58static int __init exynos4_init_cpuidle(void)
59{
60 int i, max_cpuidle_state, cpu_id;
61 struct cpuidle_device *device;
62
63 cpuidle_register_driver(&exynos4_idle_driver);
64
65 for_each_cpu(cpu_id, cpu_online_mask) {
66 device = &per_cpu(exynos4_cpuidle_device, cpu_id);
67 device->cpu = cpu_id;
68
69 device->state_count = (sizeof(exynos4_cpuidle_set) /
70 sizeof(struct cpuidle_state));
71
72 max_cpuidle_state = device->state_count;
73
74 for (i = 0; i < max_cpuidle_state; i++) {
75 memcpy(&device->states[i], &exynos4_cpuidle_set[i],
76 sizeof(struct cpuidle_state));
77 }
78
79 if (cpuidle_register_device(device)) {
80 printk(KERN_ERR "CPUidle register device failed\n,");
81 return -EIO;
82 }
83 }
84 return 0;
85}
86device_initcall(exynos4_init_cpuidle);
diff --git a/arch/arm/mach-exynos4/mach-nuri.c b/arch/arm/mach-exynos4/mach-nuri.c
index bb5d12f43af8..642702bb5b12 100644
--- a/arch/arm/mach-exynos4/mach-nuri.c
+++ b/arch/arm/mach-exynos4/mach-nuri.c
@@ -12,6 +12,7 @@
12#include <linux/serial_core.h> 12#include <linux/serial_core.h>
13#include <linux/input.h> 13#include <linux/input.h>
14#include <linux/i2c.h> 14#include <linux/i2c.h>
15#include <linux/i2c/atmel_mxt_ts.h>
15#include <linux/gpio_keys.h> 16#include <linux/gpio_keys.h>
16#include <linux/gpio.h> 17#include <linux/gpio.h>
17#include <linux/regulator/machine.h> 18#include <linux/regulator/machine.h>
@@ -32,6 +33,8 @@
32#include <plat/sdhci.h> 33#include <plat/sdhci.h>
33#include <plat/ehci.h> 34#include <plat/ehci.h>
34#include <plat/clock.h> 35#include <plat/clock.h>
36#include <plat/gpio-cfg.h>
37#include <plat/iic.h>
35 38
36#include <mach/map.h> 39#include <mach/map.h>
37 40
@@ -259,6 +262,88 @@ static struct i2c_board_info i2c1_devs[] __initdata = {
259 /* Gyro, To be updated */ 262 /* Gyro, To be updated */
260}; 263};
261 264
265/* TSP */
266static u8 mxt_init_vals[] = {
267 /* MXT_GEN_COMMAND(6) */
268 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
269 /* MXT_GEN_POWER(7) */
270 0x20, 0xff, 0x32,
271 /* MXT_GEN_ACQUIRE(8) */
272 0x0a, 0x00, 0x05, 0x00, 0x00, 0x00, 0x09, 0x23,
273 /* MXT_TOUCH_MULTI(9) */
274 0x00, 0x00, 0x00, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x02, 0x00,
275 0x00, 0x01, 0x01, 0x0e, 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00,
276 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
277 0x00,
278 /* MXT_TOUCH_KEYARRAY(15) */
279 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
280 0x00,
281 /* MXT_SPT_GPIOPWM(19) */
282 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
283 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
284 /* MXT_PROCI_GRIPFACE(20) */
285 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x28, 0x04,
286 0x0f, 0x0a,
287 /* MXT_PROCG_NOISE(22) */
288 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x23, 0x00,
289 0x00, 0x05, 0x0f, 0x19, 0x23, 0x2d, 0x03,
290 /* MXT_TOUCH_PROXIMITY(23) */
291 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
292 0x00, 0x00, 0x00, 0x00, 0x00,
293 /* MXT_PROCI_ONETOUCH(24) */
294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
295 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
296 /* MXT_SPT_SELFTEST(25) */
297 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
298 0x00, 0x00, 0x00, 0x00,
299 /* MXT_PROCI_TWOTOUCH(27) */
300 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
301 /* MXT_SPT_CTECONFIG(28) */
302 0x00, 0x00, 0x02, 0x08, 0x10, 0x00,
303};
304
305static struct mxt_platform_data mxt_platform_data = {
306 .config = mxt_init_vals,
307 .config_length = ARRAY_SIZE(mxt_init_vals),
308
309 .x_line = 18,
310 .y_line = 11,
311 .x_size = 1024,
312 .y_size = 600,
313 .blen = 0x1,
314 .threshold = 0x28,
315 .voltage = 2800000, /* 2.8V */
316 .orient = MXT_DIAGONAL_COUNTER,
317 .irqflags = IRQF_TRIGGER_FALLING,
318};
319
320static struct s3c2410_platform_i2c i2c3_data __initdata = {
321 .flags = 0,
322 .bus_num = 3,
323 .slave_addr = 0x10,
324 .frequency = 400 * 1000,
325 .sda_delay = 100,
326};
327
328static struct i2c_board_info i2c3_devs[] __initdata = {
329 {
330 I2C_BOARD_INFO("atmel_mxt_ts", 0x4a),
331 .platform_data = &mxt_platform_data,
332 .irq = IRQ_EINT(4),
333 },
334};
335
336static void __init nuri_tsp_init(void)
337{
338 int gpio;
339
340 /* TOUCH_INT: XEINT_4 */
341 gpio = EXYNOS4_GPX0(4);
342 gpio_request(gpio, "TOUCH_INT");
343 s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(0xf));
344 s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP);
345}
346
262/* GPIO I2C 5 (PMIC) */ 347/* GPIO I2C 5 (PMIC) */
263static struct i2c_board_info i2c5_devs[] __initdata = { 348static struct i2c_board_info i2c5_devs[] __initdata = {
264 /* max8997, To be updated */ 349 /* max8997, To be updated */
@@ -283,6 +368,7 @@ static struct platform_device *nuri_devices[] __initdata = {
283 &s3c_device_wdt, 368 &s3c_device_wdt,
284 &s3c_device_timer[0], 369 &s3c_device_timer[0],
285 &s5p_device_ehci, 370 &s5p_device_ehci,
371 &s3c_device_i2c3,
286 372
287 /* NURI Devices */ 373 /* NURI Devices */
288 &nuri_gpio_keys, 374 &nuri_gpio_keys,
@@ -300,8 +386,11 @@ static void __init nuri_map_io(void)
300static void __init nuri_machine_init(void) 386static void __init nuri_machine_init(void)
301{ 387{
302 nuri_sdhci_init(); 388 nuri_sdhci_init();
389 nuri_tsp_init();
303 390
304 i2c_register_board_info(1, i2c1_devs, ARRAY_SIZE(i2c1_devs)); 391 i2c_register_board_info(1, i2c1_devs, ARRAY_SIZE(i2c1_devs));
392 s3c_i2c3_set_platdata(&i2c3_data);
393 i2c_register_board_info(3, i2c3_devs, ARRAY_SIZE(i2c3_devs));
305 i2c_register_board_info(5, i2c5_devs, ARRAY_SIZE(i2c5_devs)); 394 i2c_register_board_info(5, i2c5_devs, ARRAY_SIZE(i2c5_devs));
306 395
307 nuri_ehci_init(); 396 nuri_ehci_init();
diff --git a/arch/arm/mach-gemini/board-wbd111.c b/arch/arm/mach-gemini/board-wbd111.c
index af7b68a6b258..88cc422ee444 100644
--- a/arch/arm/mach-gemini/board-wbd111.c
+++ b/arch/arm/mach-gemini/board-wbd111.c
@@ -84,7 +84,6 @@ static struct sys_timer wbd111_timer = {
84 .init = gemini_timer_init, 84 .init = gemini_timer_init,
85}; 85};
86 86
87#ifdef CONFIG_MTD_PARTITIONS
88static struct mtd_partition wbd111_partitions[] = { 87static struct mtd_partition wbd111_partitions[] = {
89 { 88 {
90 .name = "RedBoot", 89 .name = "RedBoot",
@@ -116,11 +115,7 @@ static struct mtd_partition wbd111_partitions[] = {
116 .mask_flags = MTD_WRITEABLE, 115 .mask_flags = MTD_WRITEABLE,
117 } 116 }
118}; 117};
119#define wbd111_num_partitions ARRAY_SIZE(wbd111_partitions) 118#define wbd111_num_partitions ARRAY_SIZE(wbd111_partitions)
120#else
121#define wbd111_partitions NULL
122#define wbd111_num_partitions 0
123#endif /* CONFIG_MTD_PARTITIONS */
124 119
125static void __init wbd111_init(void) 120static void __init wbd111_init(void)
126{ 121{
diff --git a/arch/arm/mach-gemini/board-wbd222.c b/arch/arm/mach-gemini/board-wbd222.c
index 99e5bbecf923..3a220347bc88 100644
--- a/arch/arm/mach-gemini/board-wbd222.c
+++ b/arch/arm/mach-gemini/board-wbd222.c
@@ -84,7 +84,6 @@ static struct sys_timer wbd222_timer = {
84 .init = gemini_timer_init, 84 .init = gemini_timer_init,
85}; 85};
86 86
87#ifdef CONFIG_MTD_PARTITIONS
88static struct mtd_partition wbd222_partitions[] = { 87static struct mtd_partition wbd222_partitions[] = {
89 { 88 {
90 .name = "RedBoot", 89 .name = "RedBoot",
@@ -116,11 +115,7 @@ static struct mtd_partition wbd222_partitions[] = {
116 .mask_flags = MTD_WRITEABLE, 115 .mask_flags = MTD_WRITEABLE,
117 } 116 }
118}; 117};
119#define wbd222_num_partitions ARRAY_SIZE(wbd222_partitions) 118#define wbd222_num_partitions ARRAY_SIZE(wbd222_partitions)
120#else
121#define wbd222_partitions NULL
122#define wbd222_num_partitions 0
123#endif /* CONFIG_MTD_PARTITIONS */
124 119
125static void __init wbd222_init(void) 120static void __init wbd222_init(void)
126{ 121{
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
index 140783386785..dca4f7f9f4f7 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
@@ -60,7 +60,6 @@ static struct platform_device ixdp425_flash = {
60#if defined(CONFIG_MTD_NAND_PLATFORM) || \ 60#if defined(CONFIG_MTD_NAND_PLATFORM) || \
61 defined(CONFIG_MTD_NAND_PLATFORM_MODULE) 61 defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
62 62
63#ifdef CONFIG_MTD_PARTITIONS
64const char *part_probes[] = { "cmdlinepart", NULL }; 63const char *part_probes[] = { "cmdlinepart", NULL };
65 64
66static struct mtd_partition ixdp425_partitions[] = { 65static struct mtd_partition ixdp425_partitions[] = {
@@ -74,7 +73,6 @@ static struct mtd_partition ixdp425_partitions[] = {
74 .size = MTDPART_SIZ_FULL 73 .size = MTDPART_SIZ_FULL
75 }, 74 },
76}; 75};
77#endif
78 76
79static void 77static void
80ixdp425_flash_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) 78ixdp425_flash_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
@@ -103,11 +101,9 @@ static struct platform_nand_data ixdp425_flash_nand_data = {
103 .nr_chips = 1, 101 .nr_chips = 1,
104 .chip_delay = 30, 102 .chip_delay = 30,
105 .options = NAND_NO_AUTOINCR, 103 .options = NAND_NO_AUTOINCR,
106#ifdef CONFIG_MTD_PARTITIONS
107 .part_probe_types = part_probes, 104 .part_probe_types = part_probes,
108 .partitions = ixdp425_partitions, 105 .partitions = ixdp425_partitions,
109 .nr_partitions = ARRAY_SIZE(ixdp425_partitions), 106 .nr_partitions = ARRAY_SIZE(ixdp425_partitions),
110#endif
111 }, 107 },
112 .ctrl = { 108 .ctrl = {
113 .cmd_ctrl = ixdp425_flash_nand_cmd_ctrl 109 .cmd_ctrl = ixdp425_flash_nand_cmd_ctrl
diff --git a/arch/arm/mach-netx/fb.c b/arch/arm/mach-netx/fb.c
index 5b84bcd30271..b9913234bbf6 100644
--- a/arch/arm/mach-netx/fb.c
+++ b/arch/arm/mach-netx/fb.c
@@ -103,7 +103,6 @@ static struct amba_device fb_device = {
103 .flags = IORESOURCE_MEM, 103 .flags = IORESOURCE_MEM,
104 }, 104 },
105 .irq = { NETX_IRQ_LCD, NO_IRQ }, 105 .irq = { NETX_IRQ_LCD, NO_IRQ },
106 .periphid = 0x10112400,
107}; 106};
108 107
109int netx_fb_init(struct clcd_board *board, struct clcd_panel *panel) 108int netx_fb_init(struct clcd_board *board, struct clcd_panel *panel)
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 2b00f72e8e36..f6247e71a194 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -23,6 +23,7 @@
23#include <linux/gpio.h> 23#include <linux/gpio.h>
24#include <linux/gpio_keys.h> 24#include <linux/gpio_keys.h>
25#include <linux/mmc/host.h> 25#include <linux/mmc/host.h>
26#include <linux/power/isp1704_charger.h>
26 27
27#include <plat/mcspi.h> 28#include <plat/mcspi.h>
28#include <plat/board.h> 29#include <plat/board.h>
@@ -53,6 +54,8 @@
53#define RX51_FMTX_RESET_GPIO 163 54#define RX51_FMTX_RESET_GPIO 163
54#define RX51_FMTX_IRQ 53 55#define RX51_FMTX_IRQ 53
55 56
57#define RX51_USB_TRANSCEIVER_RST_GPIO 67
58
56/* list all spi devices here */ 59/* list all spi devices here */
57enum { 60enum {
58 RX51_SPI_WL1251, 61 RX51_SPI_WL1251,
@@ -111,10 +114,30 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
111 }, 114 },
112}; 115};
113 116
117static void rx51_charger_set_power(bool on)
118{
119 gpio_set_value(RX51_USB_TRANSCEIVER_RST_GPIO, on);
120}
121
122static struct isp1704_charger_data rx51_charger_data = {
123 .set_power = rx51_charger_set_power,
124};
125
114static struct platform_device rx51_charger_device = { 126static struct platform_device rx51_charger_device = {
115 .name = "isp1704_charger", 127 .name = "isp1704_charger",
128 .dev = {
129 .platform_data = &rx51_charger_data,
130 },
116}; 131};
117 132
133static void __init rx51_charger_init(void)
134{
135 WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
136 GPIOF_OUT_INIT_LOW, "isp1704_reset"));
137
138 platform_device_register(&rx51_charger_device);
139}
140
118#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 141#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
119 142
120#define RX51_GPIO_CAMERA_LENS_COVER 110 143#define RX51_GPIO_CAMERA_LENS_COVER 110
@@ -961,6 +984,6 @@ void __init rx51_peripherals_init(void)
961 if (partition) 984 if (partition)
962 omap2_hsmmc_init(mmc); 985 omap2_hsmmc_init(mmc);
963 986
964 platform_device_register(&rx51_charger_device); 987 rx51_charger_init();
965} 988}
966 989
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 2fc9f94cdd29..cd19309fd3b8 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -153,7 +153,6 @@ config MACH_XCEP
153 bool "Iskratel Electronics XCEP" 153 bool "Iskratel Electronics XCEP"
154 select PXA25x 154 select PXA25x
155 select MTD 155 select MTD
156 select MTD_PARTITIONS
157 select MTD_PHYSMAP 156 select MTD_PHYSMAP
158 select MTD_CFI_INTELEXT 157 select MTD_CFI_INTELEXT
159 select MTD_CFI 158 select MTD_CFI
diff --git a/arch/arm/mach-s3c2410/mach-amlm5900.c b/arch/arm/mach-s3c2410/mach-amlm5900.c
index 44440cbd7620..dabc141243f3 100644
--- a/arch/arm/mach-s3c2410/mach-amlm5900.c
+++ b/arch/arm/mach-s3c2410/mach-amlm5900.c
@@ -58,8 +58,6 @@
58#include <plat/cpu.h> 58#include <plat/cpu.h>
59#include <plat/gpio-cfg.h> 59#include <plat/gpio-cfg.h>
60 60
61#ifdef CONFIG_MTD_PARTITIONS
62
63#include <linux/mtd/mtd.h> 61#include <linux/mtd/mtd.h>
64#include <linux/mtd/partitions.h> 62#include <linux/mtd/partitions.h>
65#include <linux/mtd/map.h> 63#include <linux/mtd/map.h>
@@ -113,7 +111,6 @@ static struct platform_device amlm5900_device_nor = {
113 .num_resources = 1, 111 .num_resources = 1,
114 .resource = &amlm5900_nor_resource, 112 .resource = &amlm5900_nor_resource,
115}; 113};
116#endif
117 114
118static struct map_desc amlm5900_iodesc[] __initdata = { 115static struct map_desc amlm5900_iodesc[] __initdata = {
119}; 116};
@@ -158,9 +155,7 @@ static struct platform_device *amlm5900_devices[] __initdata = {
158 &s3c_device_rtc, 155 &s3c_device_rtc,
159 &s3c_device_usbgadget, 156 &s3c_device_usbgadget,
160 &s3c_device_sdi, 157 &s3c_device_sdi,
161#ifdef CONFIG_MTD_PARTITIONS
162 &amlm5900_device_nor, 158 &amlm5900_device_nor,
163#endif
164}; 159};
165 160
166static void __init amlm5900_map_io(void) 161static void __init amlm5900_map_io(void)
diff --git a/arch/arm/mach-s3c2410/mach-tct_hammer.c b/arch/arm/mach-s3c2410/mach-tct_hammer.c
index a15d0621c22f..43c2b831b9e8 100644
--- a/arch/arm/mach-s3c2410/mach-tct_hammer.c
+++ b/arch/arm/mach-s3c2410/mach-tct_hammer.c
@@ -49,8 +49,6 @@
49#include <plat/devs.h> 49#include <plat/devs.h>
50#include <plat/cpu.h> 50#include <plat/cpu.h>
51 51
52#ifdef CONFIG_MTD_PARTITIONS
53
54#include <linux/mtd/mtd.h> 52#include <linux/mtd/mtd.h>
55#include <linux/mtd/partitions.h> 53#include <linux/mtd/partitions.h>
56#include <linux/mtd/map.h> 54#include <linux/mtd/map.h>
@@ -91,8 +89,6 @@ static struct platform_device tct_hammer_device_nor = {
91 .resource = &tct_hammer_nor_resource, 89 .resource = &tct_hammer_nor_resource,
92}; 90};
93 91
94#endif
95
96static struct map_desc tct_hammer_iodesc[] __initdata = { 92static struct map_desc tct_hammer_iodesc[] __initdata = {
97}; 93};
98 94
@@ -133,9 +129,7 @@ static struct platform_device *tct_hammer_devices[] __initdata = {
133 &s3c_device_rtc, 129 &s3c_device_rtc,
134 &s3c_device_usbgadget, 130 &s3c_device_usbgadget,
135 &s3c_device_sdi, 131 &s3c_device_sdi,
136#ifdef CONFIG_MTD_PARTITIONS
137 &tct_hammer_device_nor, 132 &tct_hammer_device_nor,
138#endif
139}; 133};
140 134
141static void __init tct_hammer_map_io(void) 135static void __init tct_hammer_map_io(void)
diff --git a/arch/arm/mach-s3c64xx/dev-spi.c b/arch/arm/mach-s3c64xx/dev-spi.c
index 405e62128917..82db072cb836 100644
--- a/arch/arm/mach-s3c64xx/dev-spi.c
+++ b/arch/arm/mach-s3c64xx/dev-spi.c
@@ -16,7 +16,6 @@
16 16
17#include <mach/dma.h> 17#include <mach/dma.h>
18#include <mach/map.h> 18#include <mach/map.h>
19#include <mach/gpio-bank-c.h>
20#include <mach/spi-clocks.h> 19#include <mach/spi-clocks.h>
21#include <mach/irqs.h> 20#include <mach/irqs.h>
22 21
@@ -40,23 +39,15 @@ static char *spi_src_clks[] = {
40 */ 39 */
41static int s3c64xx_spi_cfg_gpio(struct platform_device *pdev) 40static int s3c64xx_spi_cfg_gpio(struct platform_device *pdev)
42{ 41{
42 unsigned int base;
43
43 switch (pdev->id) { 44 switch (pdev->id) {
44 case 0: 45 case 0:
45 s3c_gpio_cfgpin(S3C64XX_GPC(0), S3C64XX_GPC0_SPI_MISO0); 46 base = S3C64XX_GPC(0);
46 s3c_gpio_cfgpin(S3C64XX_GPC(1), S3C64XX_GPC1_SPI_CLKO);
47 s3c_gpio_cfgpin(S3C64XX_GPC(2), S3C64XX_GPC2_SPI_MOSIO);
48 s3c_gpio_setpull(S3C64XX_GPC(0), S3C_GPIO_PULL_UP);
49 s3c_gpio_setpull(S3C64XX_GPC(1), S3C_GPIO_PULL_UP);
50 s3c_gpio_setpull(S3C64XX_GPC(2), S3C_GPIO_PULL_UP);
51 break; 47 break;
52 48
53 case 1: 49 case 1:
54 s3c_gpio_cfgpin(S3C64XX_GPC(4), S3C64XX_GPC4_SPI_MISO1); 50 base = S3C64XX_GPC(4);
55 s3c_gpio_cfgpin(S3C64XX_GPC(5), S3C64XX_GPC5_SPI_CLK1);
56 s3c_gpio_cfgpin(S3C64XX_GPC(6), S3C64XX_GPC6_SPI_MOSI1);
57 s3c_gpio_setpull(S3C64XX_GPC(4), S3C_GPIO_PULL_UP);
58 s3c_gpio_setpull(S3C64XX_GPC(5), S3C_GPIO_PULL_UP);
59 s3c_gpio_setpull(S3C64XX_GPC(6), S3C_GPIO_PULL_UP);
60 break; 51 break;
61 52
62 default: 53 default:
@@ -64,6 +55,9 @@ static int s3c64xx_spi_cfg_gpio(struct platform_device *pdev)
64 return -EINVAL; 55 return -EINVAL;
65 } 56 }
66 57
58 s3c_gpio_cfgall_range(base, 3,
59 S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
60
67 return 0; 61 return 0;
68} 62}
69 63
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-a.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-a.h
deleted file mode 100644
index 34212e1a7e81..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-a.h
+++ /dev/null
@@ -1,48 +0,0 @@
1/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-a.h
2 *
3 * Copyright 2008 Openmoko, Inc.
4 * Copyright 2008 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * GPIO Bank A register and configuration definitions
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#define S3C64XX_GPACON (S3C64XX_GPA_BASE + 0x00)
16#define S3C64XX_GPADAT (S3C64XX_GPA_BASE + 0x04)
17#define S3C64XX_GPAPUD (S3C64XX_GPA_BASE + 0x08)
18#define S3C64XX_GPACONSLP (S3C64XX_GPA_BASE + 0x0c)
19#define S3C64XX_GPAPUDSLP (S3C64XX_GPA_BASE + 0x10)
20
21#define S3C64XX_GPA_CONMASK(__gpio) (0xf << ((__gpio) * 4))
22#define S3C64XX_GPA_INPUT(__gpio) (0x0 << ((__gpio) * 4))
23#define S3C64XX_GPA_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
24
25#define S3C64XX_GPA0_UART_RXD0 (0x02 << 0)
26#define S3C64XX_GPA0_EINT_G1_0 (0x07 << 0)
27
28#define S3C64XX_GPA1_UART_TXD0 (0x02 << 4)
29#define S3C64XX_GPA1_EINT_G1_1 (0x07 << 4)
30
31#define S3C64XX_GPA2_UART_nCTS0 (0x02 << 8)
32#define S3C64XX_GPA2_EINT_G1_2 (0x07 << 8)
33
34#define S3C64XX_GPA3_UART_nRTS0 (0x02 << 12)
35#define S3C64XX_GPA3_EINT_G1_3 (0x07 << 12)
36
37#define S3C64XX_GPA4_UART_RXD1 (0x02 << 16)
38#define S3C64XX_GPA4_EINT_G1_4 (0x07 << 16)
39
40#define S3C64XX_GPA5_UART_TXD1 (0x02 << 20)
41#define S3C64XX_GPA5_EINT_G1_5 (0x07 << 20)
42
43#define S3C64XX_GPA6_UART_nCTS1 (0x02 << 24)
44#define S3C64XX_GPA6_EINT_G1_6 (0x07 << 24)
45
46#define S3C64XX_GPA7_UART_nRTS1 (0x02 << 28)
47#define S3C64XX_GPA7_EINT_G1_7 (0x07 << 28)
48
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-b.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-b.h
deleted file mode 100644
index 7232c037e642..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-b.h
+++ /dev/null
@@ -1,60 +0,0 @@
1/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-b.h
2 *
3 * Copyright 2008 Openmoko, Inc.
4 * Copyright 2008 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * GPIO Bank B register and configuration definitions
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#define S3C64XX_GPBCON (S3C64XX_GPB_BASE + 0x00)
16#define S3C64XX_GPBDAT (S3C64XX_GPB_BASE + 0x04)
17#define S3C64XX_GPBPUD (S3C64XX_GPB_BASE + 0x08)
18#define S3C64XX_GPBCONSLP (S3C64XX_GPB_BASE + 0x0c)
19#define S3C64XX_GPBPUDSLP (S3C64XX_GPB_BASE + 0x10)
20
21#define S3C64XX_GPB_CONMASK(__gpio) (0xf << ((__gpio) * 4))
22#define S3C64XX_GPB_INPUT(__gpio) (0x0 << ((__gpio) * 4))
23#define S3C64XX_GPB_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
24
25#define S3C64XX_GPB0_UART_RXD2 (0x02 << 0)
26#define S3C64XX_GPB0_EXTDMA_REQ (0x03 << 0)
27#define S3C64XX_GPB0_IrDA_RXD (0x04 << 0)
28#define S3C64XX_GPB0_ADDR_CF0 (0x05 << 0)
29#define S3C64XX_GPB0_EINT_G1_8 (0x07 << 0)
30
31#define S3C64XX_GPB1_UART_TXD2 (0x02 << 4)
32#define S3C64XX_GPB1_EXTDMA_ACK (0x03 << 4)
33#define S3C64XX_GPB1_IrDA_TXD (0x04 << 4)
34#define S3C64XX_GPB1_ADDR_CF1 (0x05 << 4)
35#define S3C64XX_GPB1_EINT_G1_9 (0x07 << 4)
36
37#define S3C64XX_GPB2_UART_RXD3 (0x02 << 8)
38#define S3C64XX_GPB2_IrDA_RXD (0x03 << 8)
39#define S3C64XX_GPB2_EXTDMA_REQ (0x04 << 8)
40#define S3C64XX_GPB2_ADDR_CF2 (0x05 << 8)
41#define S3C64XX_GPB2_I2C_SCL1 (0x06 << 8)
42#define S3C64XX_GPB2_EINT_G1_10 (0x07 << 8)
43
44#define S3C64XX_GPB3_UART_TXD3 (0x02 << 12)
45#define S3C64XX_GPB3_IrDA_TXD (0x03 << 12)
46#define S3C64XX_GPB3_EXTDMA_ACK (0x04 << 12)
47#define S3C64XX_GPB3_I2C_SDA1 (0x06 << 12)
48#define S3C64XX_GPB3_EINT_G1_11 (0x07 << 12)
49
50#define S3C64XX_GPB4_IrDA_SDBW (0x02 << 16)
51#define S3C64XX_GPB4_CAM_FIELD (0x03 << 16)
52#define S3C64XX_GPB4_CF_DATA_DIR (0x04 << 16)
53#define S3C64XX_GPB4_EINT_G1_12 (0x07 << 16)
54
55#define S3C64XX_GPB5_I2C_SCL0 (0x02 << 20)
56#define S3C64XX_GPB5_EINT_G1_13 (0x07 << 20)
57
58#define S3C64XX_GPB6_I2C_SDA0 (0x02 << 24)
59#define S3C64XX_GPB6_EINT_G1_14 (0x07 << 24)
60
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-c.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-c.h
deleted file mode 100644
index db189ab1639a..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-c.h
+++ /dev/null
@@ -1,53 +0,0 @@
1/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-c.h
2 *
3 * Copyright 2008 Openmoko, Inc.
4 * Copyright 2008 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * GPIO Bank C register and configuration definitions
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#define S3C64XX_GPCCON (S3C64XX_GPC_BASE + 0x00)
16#define S3C64XX_GPCDAT (S3C64XX_GPC_BASE + 0x04)
17#define S3C64XX_GPCPUD (S3C64XX_GPC_BASE + 0x08)
18#define S3C64XX_GPCCONSLP (S3C64XX_GPC_BASE + 0x0c)
19#define S3C64XX_GPCPUDSLP (S3C64XX_GPC_BASE + 0x10)
20
21#define S3C64XX_GPC_CONMASK(__gpio) (0xf << ((__gpio) * 4))
22#define S3C64XX_GPC_INPUT(__gpio) (0x0 << ((__gpio) * 4))
23#define S3C64XX_GPC_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
24
25#define S3C64XX_GPC0_SPI_MISO0 (0x02 << 0)
26#define S3C64XX_GPC0_EINT_G2_0 (0x07 << 0)
27
28#define S3C64XX_GPC1_SPI_CLKO (0x02 << 4)
29#define S3C64XX_GPC1_EINT_G2_1 (0x07 << 4)
30
31#define S3C64XX_GPC2_SPI_MOSIO (0x02 << 8)
32#define S3C64XX_GPC2_EINT_G2_2 (0x07 << 8)
33
34#define S3C64XX_GPC3_SPI_nCSO (0x02 << 12)
35#define S3C64XX_GPC3_EINT_G2_3 (0x07 << 12)
36
37#define S3C64XX_GPC4_SPI_MISO1 (0x02 << 16)
38#define S3C64XX_GPC4_MMC2_CMD (0x03 << 16)
39#define S3C64XX_GPC4_I2S_V40_DO0 (0x05 << 16)
40#define S3C64XX_GPC4_EINT_G2_4 (0x07 << 16)
41
42#define S3C64XX_GPC5_SPI_CLK1 (0x02 << 20)
43#define S3C64XX_GPC5_MMC2_CLK (0x03 << 20)
44#define S3C64XX_GPC5_I2S_V40_DO1 (0x05 << 20)
45#define S3C64XX_GPC5_EINT_G2_5 (0x07 << 20)
46
47#define S3C64XX_GPC6_SPI_MOSI1 (0x02 << 24)
48#define S3C64XX_GPC6_EINT_G2_6 (0x07 << 24)
49
50#define S3C64XX_GPC7_SPI_nCS1 (0x02 << 28)
51#define S3C64XX_GPC7_I2S_V40_DO2 (0x05 << 28)
52#define S3C64XX_GPC7_EINT_G2_7 (0x07 << 28)
53
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-d.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-d.h
deleted file mode 100644
index 1a01cee7aca3..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-d.h
+++ /dev/null
@@ -1,49 +0,0 @@
1/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-d.h
2 *
3 * Copyright 2008 Openmoko, Inc.
4 * Copyright 2008 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * GPIO Bank D register and configuration definitions
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#define S3C64XX_GPDCON (S3C64XX_GPD_BASE + 0x00)
16#define S3C64XX_GPDDAT (S3C64XX_GPD_BASE + 0x04)
17#define S3C64XX_GPDPUD (S3C64XX_GPD_BASE + 0x08)
18#define S3C64XX_GPDCONSLP (S3C64XX_GPD_BASE + 0x0c)
19#define S3C64XX_GPDPUDSLP (S3C64XX_GPD_BASE + 0x10)
20
21#define S3C64XX_GPD_CONMASK(__gpio) (0xf << ((__gpio) * 4))
22#define S3C64XX_GPD_INPUT(__gpio) (0x0 << ((__gpio) * 4))
23#define S3C64XX_GPD_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
24
25#define S3C64XX_GPD0_PCM0_SCLK (0x02 << 0)
26#define S3C64XX_GPD0_I2S0_CLK (0x03 << 0)
27#define S3C64XX_GPD0_AC97_BITCLK (0x04 << 0)
28#define S3C64XX_GPD0_EINT_G3_0 (0x07 << 0)
29
30#define S3C64XX_GPD1_PCM0_EXTCLK (0x02 << 4)
31#define S3C64XX_GPD1_I2S0_CDCLK (0x03 << 4)
32#define S3C64XX_GPD1_AC97_nRESET (0x04 << 4)
33#define S3C64XX_GPD1_EINT_G3_1 (0x07 << 4)
34
35#define S3C64XX_GPD2_PCM0_FSYNC (0x02 << 8)
36#define S3C64XX_GPD2_I2S0_LRCLK (0x03 << 8)
37#define S3C64XX_GPD2_AC97_SYNC (0x04 << 8)
38#define S3C64XX_GPD2_EINT_G3_2 (0x07 << 8)
39
40#define S3C64XX_GPD3_PCM0_SIN (0x02 << 12)
41#define S3C64XX_GPD3_I2S0_DI (0x03 << 12)
42#define S3C64XX_GPD3_AC97_SDI (0x04 << 12)
43#define S3C64XX_GPD3_EINT_G3_3 (0x07 << 12)
44
45#define S3C64XX_GPD4_PCM0_SOUT (0x02 << 16)
46#define S3C64XX_GPD4_I2S0_D0 (0x03 << 16)
47#define S3C64XX_GPD4_AC97_SDO (0x04 << 16)
48#define S3C64XX_GPD4_EINT_G3_4 (0x07 << 16)
49
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-e.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-e.h
deleted file mode 100644
index f057adb627dd..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-e.h
+++ /dev/null
@@ -1,44 +0,0 @@
1/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-e.h
2 *
3 * Copyright 2008 Openmoko, Inc.
4 * Copyright 2008 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * GPIO Bank E register and configuration definitions
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#define S3C64XX_GPECON (S3C64XX_GPE_BASE + 0x00)
16#define S3C64XX_GPEDAT (S3C64XX_GPE_BASE + 0x04)
17#define S3C64XX_GPEPUD (S3C64XX_GPE_BASE + 0x08)
18#define S3C64XX_GPECONSLP (S3C64XX_GPE_BASE + 0x0c)
19#define S3C64XX_GPEPUDSLP (S3C64XX_GPE_BASE + 0x10)
20
21#define S3C64XX_GPE_CONMASK(__gpio) (0xf << ((__gpio) * 4))
22#define S3C64XX_GPE_INPUT(__gpio) (0x0 << ((__gpio) * 4))
23#define S3C64XX_GPE_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
24
25#define S3C64XX_GPE0_PCM1_SCLK (0x02 << 0)
26#define S3C64XX_GPE0_I2S1_CLK (0x03 << 0)
27#define S3C64XX_GPE0_AC97_BITCLK (0x04 << 0)
28
29#define S3C64XX_GPE1_PCM1_EXTCLK (0x02 << 4)
30#define S3C64XX_GPE1_I2S1_CDCLK (0x03 << 4)
31#define S3C64XX_GPE1_AC97_nRESET (0x04 << 4)
32
33#define S3C64XX_GPE2_PCM1_FSYNC (0x02 << 8)
34#define S3C64XX_GPE2_I2S1_LRCLK (0x03 << 8)
35#define S3C64XX_GPE2_AC97_SYNC (0x04 << 8)
36
37#define S3C64XX_GPE3_PCM1_SIN (0x02 << 12)
38#define S3C64XX_GPE3_I2S1_DI (0x03 << 12)
39#define S3C64XX_GPE3_AC97_SDI (0x04 << 12)
40
41#define S3C64XX_GPE4_PCM1_SOUT (0x02 << 16)
42#define S3C64XX_GPE4_I2S1_D0 (0x03 << 16)
43#define S3C64XX_GPE4_AC97_SDO (0x04 << 16)
44
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-f.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-f.h
deleted file mode 100644
index 62ab8f5e7835..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-f.h
+++ /dev/null
@@ -1,71 +0,0 @@
1/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-f.h
2 *
3 * Copyright 2008 Openmoko, Inc.
4 * Copyright 2008 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * GPIO Bank F register and configuration definitions
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#define S3C64XX_GPFCON (S3C64XX_GPF_BASE + 0x00)
16#define S3C64XX_GPFDAT (S3C64XX_GPF_BASE + 0x04)
17#define S3C64XX_GPFPUD (S3C64XX_GPF_BASE + 0x08)
18#define S3C64XX_GPFCONSLP (S3C64XX_GPF_BASE + 0x0c)
19#define S3C64XX_GPFPUDSLP (S3C64XX_GPF_BASE + 0x10)
20
21#define S3C64XX_GPF_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
22#define S3C64XX_GPF_INPUT(__gpio) (0x0 << ((__gpio) * 2))
23#define S3C64XX_GPF_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
24
25#define S3C64XX_GPF0_CAMIF_CLK (0x02 << 0)
26#define S3C64XX_GPF0_EINT_G4_0 (0x03 << 0)
27
28#define S3C64XX_GPF1_CAMIF_HREF (0x02 << 2)
29#define S3C64XX_GPF1_EINT_G4_1 (0x03 << 2)
30
31#define S3C64XX_GPF2_CAMIF_PCLK (0x02 << 4)
32#define S3C64XX_GPF2_EINT_G4_2 (0x03 << 4)
33
34#define S3C64XX_GPF3_CAMIF_nRST (0x02 << 6)
35#define S3C64XX_GPF3_EINT_G4_3 (0x03 << 6)
36
37#define S3C64XX_GPF4_CAMIF_VSYNC (0x02 << 8)
38#define S3C64XX_GPF4_EINT_G4_4 (0x03 << 8)
39
40#define S3C64XX_GPF5_CAMIF_YDATA0 (0x02 << 10)
41#define S3C64XX_GPF5_EINT_G4_5 (0x03 << 10)
42
43#define S3C64XX_GPF6_CAMIF_YDATA1 (0x02 << 12)
44#define S3C64XX_GPF6_EINT_G4_6 (0x03 << 12)
45
46#define S3C64XX_GPF7_CAMIF_YDATA2 (0x02 << 14)
47#define S3C64XX_GPF7_EINT_G4_7 (0x03 << 14)
48
49#define S3C64XX_GPF8_CAMIF_YDATA3 (0x02 << 16)
50#define S3C64XX_GPF8_EINT_G4_8 (0x03 << 16)
51
52#define S3C64XX_GPF9_CAMIF_YDATA4 (0x02 << 18)
53#define S3C64XX_GPF9_EINT_G4_9 (0x03 << 18)
54
55#define S3C64XX_GPF10_CAMIF_YDATA5 (0x02 << 20)
56#define S3C64XX_GPF10_EINT_G4_10 (0x03 << 20)
57
58#define S3C64XX_GPF11_CAMIF_YDATA6 (0x02 << 22)
59#define S3C64XX_GPF11_EINT_G4_11 (0x03 << 22)
60
61#define S3C64XX_GPF12_CAMIF_YDATA7 (0x02 << 24)
62#define S3C64XX_GPF12_EINT_G4_12 (0x03 << 24)
63
64#define S3C64XX_GPF13_PWM_ECLK (0x02 << 26)
65#define S3C64XX_GPF13_EINT_G4_13 (0x03 << 26)
66
67#define S3C64XX_GPF14_PWM_TOUT0 (0x02 << 28)
68#define S3C64XX_GPF14_CLKOUT0 (0x03 << 28)
69
70#define S3C64XX_GPF15_PWM_TOUT1 (0x02 << 30)
71
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-g.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-g.h
deleted file mode 100644
index b94954af1598..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-g.h
+++ /dev/null
@@ -1,42 +0,0 @@
1/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-g.h
2 *
3 * Copyright 2008 Openmoko, Inc.
4 * Copyright 2008 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * GPIO Bank G register and configuration definitions
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#define S3C64XX_GPGCON (S3C64XX_GPG_BASE + 0x00)
16#define S3C64XX_GPGDAT (S3C64XX_GPG_BASE + 0x04)
17#define S3C64XX_GPGPUD (S3C64XX_GPG_BASE + 0x08)
18#define S3C64XX_GPGCONSLP (S3C64XX_GPG_BASE + 0x0c)
19#define S3C64XX_GPGPUDSLP (S3C64XX_GPG_BASE + 0x10)
20
21#define S3C64XX_GPG_CONMASK(__gpio) (0xf << ((__gpio) * 4))
22#define S3C64XX_GPG_INPUT(__gpio) (0x0 << ((__gpio) * 4))
23#define S3C64XX_GPG_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
24
25#define S3C64XX_GPG0_MMC0_CLK (0x02 << 0)
26#define S3C64XX_GPG0_EINT_G5_0 (0x07 << 0)
27
28#define S3C64XX_GPG1_MMC0_CMD (0x02 << 4)
29#define S3C64XX_GPG1_EINT_G5_1 (0x07 << 4)
30
31#define S3C64XX_GPG2_MMC0_DATA0 (0x02 << 8)
32#define S3C64XX_GPG2_EINT_G5_2 (0x07 << 8)
33
34#define S3C64XX_GPG3_MMC0_DATA1 (0x02 << 12)
35#define S3C64XX_GPG3_EINT_G5_3 (0x07 << 12)
36
37#define S3C64XX_GPG4_MMC0_DATA2 (0x02 << 16)
38#define S3C64XX_GPG4_EINT_G5_4 (0x07 << 16)
39
40#define S3C64XX_GPG5_MMC0_DATA3 (0x02 << 20)
41#define S3C64XX_GPG5_EINT_G5_5 (0x07 << 20)
42
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-h.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-h.h
deleted file mode 100644
index 5d75aaad865e..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-h.h
+++ /dev/null
@@ -1,74 +0,0 @@
1/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-h.h
2 *
3 * Copyright 2008 Openmoko, Inc.
4 * Copyright 2008 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * GPIO Bank H register and configuration definitions
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#define S3C64XX_GPHCON0 (S3C64XX_GPH_BASE + 0x00)
16#define S3C64XX_GPHCON1 (S3C64XX_GPH_BASE + 0x04)
17#define S3C64XX_GPHDAT (S3C64XX_GPH_BASE + 0x08)
18#define S3C64XX_GPHPUD (S3C64XX_GPH_BASE + 0x0c)
19#define S3C64XX_GPHCONSLP (S3C64XX_GPH_BASE + 0x10)
20#define S3C64XX_GPHPUDSLP (S3C64XX_GPH_BASE + 0x14)
21
22#define S3C64XX_GPH_CONMASK(__gpio) (0xf << ((__gpio) * 4))
23#define S3C64XX_GPH_INPUT(__gpio) (0x0 << ((__gpio) * 4))
24#define S3C64XX_GPH_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
25
26#define S3C64XX_GPH0_MMC1_CLK (0x02 << 0)
27#define S3C64XX_GPH0_KP_COL0 (0x04 << 0)
28#define S3C64XX_GPH0_EINT_G6_0 (0x07 << 0)
29
30#define S3C64XX_GPH1_MMC1_CMD (0x02 << 4)
31#define S3C64XX_GPH1_KP_COL1 (0x04 << 4)
32#define S3C64XX_GPH1_EINT_G6_1 (0x07 << 4)
33
34#define S3C64XX_GPH2_MMC1_DATA0 (0x02 << 8)
35#define S3C64XX_GPH2_KP_COL2 (0x04 << 8)
36#define S3C64XX_GPH2_EINT_G6_2 (0x07 << 8)
37
38#define S3C64XX_GPH3_MMC1_DATA1 (0x02 << 12)
39#define S3C64XX_GPH3_KP_COL3 (0x04 << 12)
40#define S3C64XX_GPH3_EINT_G6_3 (0x07 << 12)
41
42#define S3C64XX_GPH4_MMC1_DATA2 (0x02 << 16)
43#define S3C64XX_GPH4_KP_COL4 (0x04 << 16)
44#define S3C64XX_GPH4_EINT_G6_4 (0x07 << 16)
45
46#define S3C64XX_GPH5_MMC1_DATA3 (0x02 << 20)
47#define S3C64XX_GPH5_KP_COL5 (0x04 << 20)
48#define S3C64XX_GPH5_EINT_G6_5 (0x07 << 20)
49
50#define S3C64XX_GPH6_MMC1_DATA4 (0x02 << 24)
51#define S3C64XX_GPH6_MMC2_DATA0 (0x03 << 24)
52#define S3C64XX_GPH6_KP_COL6 (0x04 << 24)
53#define S3C64XX_GPH6_I2S_V40_BCLK (0x05 << 24)
54#define S3C64XX_GPH6_ADDR_CF0 (0x06 << 24)
55#define S3C64XX_GPH6_EINT_G6_6 (0x07 << 24)
56
57#define S3C64XX_GPH7_MMC1_DATA5 (0x02 << 28)
58#define S3C64XX_GPH7_MMC2_DATA1 (0x03 << 28)
59#define S3C64XX_GPH7_KP_COL7 (0x04 << 28)
60#define S3C64XX_GPH7_I2S_V40_CDCLK (0x05 << 28)
61#define S3C64XX_GPH7_ADDR_CF1 (0x06 << 28)
62#define S3C64XX_GPH7_EINT_G6_7 (0x07 << 28)
63
64#define S3C64XX_GPH8_MMC1_DATA6 (0x02 << 0)
65#define S3C64XX_GPH8_MMC2_DATA2 (0x03 << 0)
66#define S3C64XX_GPH8_I2S_V40_LRCLK (0x05 << 0)
67#define S3C64XX_GPH8_ADDR_CF2 (0x06 << 0)
68#define S3C64XX_GPH8_EINT_G6_8 (0x07 << 0)
69
70#define S3C64XX_GPH9_OUTPUT (0x01 << 4)
71#define S3C64XX_GPH9_MMC1_DATA7 (0x02 << 4)
72#define S3C64XX_GPH9_MMC2_DATA3 (0x03 << 4)
73#define S3C64XX_GPH9_I2S_V40_DI (0x05 << 4)
74#define S3C64XX_GPH9_EINT_G6_9 (0x07 << 4)
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-i.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-i.h
deleted file mode 100644
index 4ceaa6098bc7..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-i.h
+++ /dev/null
@@ -1,40 +0,0 @@
1/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-i.h
2 *
3 * Copyright 2008 Openmoko, Inc.
4 * Copyright 2008 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * GPIO Bank I register and configuration definitions
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#define S3C64XX_GPICON (S3C64XX_GPI_BASE + 0x00)
16#define S3C64XX_GPIDAT (S3C64XX_GPI_BASE + 0x04)
17#define S3C64XX_GPIPUD (S3C64XX_GPI_BASE + 0x08)
18#define S3C64XX_GPICONSLP (S3C64XX_GPI_BASE + 0x0c)
19#define S3C64XX_GPIPUDSLP (S3C64XX_GPI_BASE + 0x10)
20
21#define S3C64XX_GPI_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
22#define S3C64XX_GPI_INPUT(__gpio) (0x0 << ((__gpio) * 2))
23#define S3C64XX_GPI_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
24
25#define S3C64XX_GPI0_VD0 (0x02 << 0)
26#define S3C64XX_GPI1_VD1 (0x02 << 2)
27#define S3C64XX_GPI2_VD2 (0x02 << 4)
28#define S3C64XX_GPI3_VD3 (0x02 << 6)
29#define S3C64XX_GPI4_VD4 (0x02 << 8)
30#define S3C64XX_GPI5_VD5 (0x02 << 10)
31#define S3C64XX_GPI6_VD6 (0x02 << 12)
32#define S3C64XX_GPI7_VD7 (0x02 << 14)
33#define S3C64XX_GPI8_VD8 (0x02 << 16)
34#define S3C64XX_GPI9_VD9 (0x02 << 18)
35#define S3C64XX_GPI10_VD10 (0x02 << 20)
36#define S3C64XX_GPI11_VD11 (0x02 << 22)
37#define S3C64XX_GPI12_VD12 (0x02 << 24)
38#define S3C64XX_GPI13_VD13 (0x02 << 26)
39#define S3C64XX_GPI14_VD14 (0x02 << 28)
40#define S3C64XX_GPI15_VD15 (0x02 << 30)
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-j.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-j.h
deleted file mode 100644
index 6f25cd079a40..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-j.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-j.h
2 *
3 * Copyright 2008 Openmoko, Inc.
4 * Copyright 2008 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * GPIO Bank J register and configuration definitions
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#define S3C64XX_GPJCON (S3C64XX_GPJ_BASE + 0x00)
16#define S3C64XX_GPJDAT (S3C64XX_GPJ_BASE + 0x04)
17#define S3C64XX_GPJPUD (S3C64XX_GPJ_BASE + 0x08)
18#define S3C64XX_GPJCONSLP (S3C64XX_GPJ_BASE + 0x0c)
19#define S3C64XX_GPJPUDSLP (S3C64XX_GPJ_BASE + 0x10)
20
21#define S3C64XX_GPJ_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
22#define S3C64XX_GPJ_INPUT(__gpio) (0x0 << ((__gpio) * 2))
23#define S3C64XX_GPJ_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
24
25#define S3C64XX_GPJ0_VD16 (0x02 << 0)
26#define S3C64XX_GPJ1_VD17 (0x02 << 2)
27#define S3C64XX_GPJ2_VD18 (0x02 << 4)
28#define S3C64XX_GPJ3_VD19 (0x02 << 6)
29#define S3C64XX_GPJ4_VD20 (0x02 << 8)
30#define S3C64XX_GPJ5_VD21 (0x02 << 10)
31#define S3C64XX_GPJ6_VD22 (0x02 << 12)
32#define S3C64XX_GPJ7_VD23 (0x02 << 14)
33#define S3C64XX_GPJ8_LCD_HSYNC (0x02 << 16)
34#define S3C64XX_GPJ9_LCD_VSYNC (0x02 << 18)
35#define S3C64XX_GPJ10_LCD_VDEN (0x02 << 20)
36#define S3C64XX_GPJ11_LCD_VCLK (0x02 << 22)
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-n.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-n.h
deleted file mode 100644
index d0aeda1cd9de..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-n.h
+++ /dev/null
@@ -1,54 +0,0 @@
1/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-n.h
2 *
3 * Copyright 2008 Openmoko, Inc.
4 * Copyright 2008 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * GPIO Bank N register and configuration definitions
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#define S3C64XX_GPNCON (S3C64XX_GPN_BASE + 0x00)
16#define S3C64XX_GPNDAT (S3C64XX_GPN_BASE + 0x04)
17#define S3C64XX_GPNPUD (S3C64XX_GPN_BASE + 0x08)
18
19#define S3C64XX_GPN_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
20#define S3C64XX_GPN_INPUT(__gpio) (0x0 << ((__gpio) * 2))
21#define S3C64XX_GPN_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
22
23#define S3C64XX_GPN0_EINT0 (0x02 << 0)
24#define S3C64XX_GPN0_KP_ROW0 (0x03 << 0)
25
26#define S3C64XX_GPN1_EINT1 (0x02 << 2)
27#define S3C64XX_GPN1_KP_ROW1 (0x03 << 2)
28
29#define S3C64XX_GPN2_EINT2 (0x02 << 4)
30#define S3C64XX_GPN2_KP_ROW2 (0x03 << 4)
31
32#define S3C64XX_GPN3_EINT3 (0x02 << 6)
33#define S3C64XX_GPN3_KP_ROW3 (0x03 << 6)
34
35#define S3C64XX_GPN4_EINT4 (0x02 << 8)
36#define S3C64XX_GPN4_KP_ROW4 (0x03 << 8)
37
38#define S3C64XX_GPN5_EINT5 (0x02 << 10)
39#define S3C64XX_GPN5_KP_ROW5 (0x03 << 10)
40
41#define S3C64XX_GPN6_EINT6 (0x02 << 12)
42#define S3C64XX_GPN6_KP_ROW6 (0x03 << 12)
43
44#define S3C64XX_GPN7_EINT7 (0x02 << 14)
45#define S3C64XX_GPN7_KP_ROW7 (0x03 << 14)
46
47#define S3C64XX_GPN8_EINT8 (0x02 << 16)
48#define S3C64XX_GPN9_EINT9 (0x02 << 18)
49#define S3C64XX_GPN10_EINT10 (0x02 << 20)
50#define S3C64XX_GPN11_EINT11 (0x02 << 22)
51#define S3C64XX_GPN12_EINT12 (0x02 << 24)
52#define S3C64XX_GPN13_EINT13 (0x02 << 26)
53#define S3C64XX_GPN14_EINT14 (0x02 << 28)
54#define S3C64XX_GPN15_EINT15 (0x02 << 30)
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-o.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-o.h
deleted file mode 100644
index 21868fa102d0..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-o.h
+++ /dev/null
@@ -1,70 +0,0 @@
1/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-o.h
2 *
3 * Copyright 2008 Openmoko, Inc.
4 * Copyright 2008 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * GPIO Bank O register and configuration definitions
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#define S3C64XX_GPOCON (S3C64XX_GPO_BASE + 0x00)
16#define S3C64XX_GPODAT (S3C64XX_GPO_BASE + 0x04)
17#define S3C64XX_GPOPUD (S3C64XX_GPO_BASE + 0x08)
18#define S3C64XX_GPOCONSLP (S3C64XX_GPO_BASE + 0x0c)
19#define S3C64XX_GPOPUDSLP (S3C64XX_GPO_BASE + 0x10)
20
21#define S3C64XX_GPO_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
22#define S3C64XX_GPO_INPUT(__gpio) (0x0 << ((__gpio) * 2))
23#define S3C64XX_GPO_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
24
25#define S3C64XX_GPO0_MEM0_nCS2 (0x02 << 0)
26#define S3C64XX_GPO0_EINT_G7_0 (0x03 << 0)
27
28#define S3C64XX_GPO1_MEM0_nCS3 (0x02 << 2)
29#define S3C64XX_GPO1_EINT_G7_1 (0x03 << 2)
30
31#define S3C64XX_GPO2_MEM0_nCS4 (0x02 << 4)
32#define S3C64XX_GPO2_EINT_G7_2 (0x03 << 4)
33
34#define S3C64XX_GPO3_MEM0_nCS5 (0x02 << 6)
35#define S3C64XX_GPO3_EINT_G7_3 (0x03 << 6)
36
37#define S3C64XX_GPO4_EINT_G7_4 (0x03 << 8)
38
39#define S3C64XX_GPO5_EINT_G7_5 (0x03 << 10)
40
41#define S3C64XX_GPO6_MEM0_ADDR6 (0x02 << 12)
42#define S3C64XX_GPO6_EINT_G7_6 (0x03 << 12)
43
44#define S3C64XX_GPO7_MEM0_ADDR7 (0x02 << 14)
45#define S3C64XX_GPO7_EINT_G7_7 (0x03 << 14)
46
47#define S3C64XX_GPO8_MEM0_ADDR8 (0x02 << 16)
48#define S3C64XX_GPO8_EINT_G7_8 (0x03 << 16)
49
50#define S3C64XX_GPO9_MEM0_ADDR9 (0x02 << 18)
51#define S3C64XX_GPO9_EINT_G7_9 (0x03 << 18)
52
53#define S3C64XX_GPO10_MEM0_ADDR10 (0x02 << 20)
54#define S3C64XX_GPO10_EINT_G7_10 (0x03 << 20)
55
56#define S3C64XX_GPO11_MEM0_ADDR11 (0x02 << 22)
57#define S3C64XX_GPO11_EINT_G7_11 (0x03 << 22)
58
59#define S3C64XX_GPO12_MEM0_ADDR12 (0x02 << 24)
60#define S3C64XX_GPO12_EINT_G7_12 (0x03 << 24)
61
62#define S3C64XX_GPO13_MEM0_ADDR13 (0x02 << 26)
63#define S3C64XX_GPO13_EINT_G7_13 (0x03 << 26)
64
65#define S3C64XX_GPO14_MEM0_ADDR14 (0x02 << 28)
66#define S3C64XX_GPO14_EINT_G7_14 (0x03 << 28)
67
68#define S3C64XX_GPO15_MEM0_ADDR15 (0x02 << 30)
69#define S3C64XX_GPO15_EINT_G7_15 (0x03 << 30)
70
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-p.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-p.h
deleted file mode 100644
index 46bcfb63b8de..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-p.h
+++ /dev/null
@@ -1,69 +0,0 @@
1/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-p.h
2 *
3 * Copyright 2008 Openmoko, Inc.
4 * Copyright 2008 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * GPIO Bank P register and configuration definitions
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#define S3C64XX_GPPCON (S3C64XX_GPP_BASE + 0x00)
16#define S3C64XX_GPPDAT (S3C64XX_GPP_BASE + 0x04)
17#define S3C64XX_GPPPUD (S3C64XX_GPP_BASE + 0x08)
18#define S3C64XX_GPPCONSLP (S3C64XX_GPP_BASE + 0x0c)
19#define S3C64XX_GPPPUDSLP (S3C64XX_GPP_BASE + 0x10)
20
21#define S3C64XX_GPP_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
22#define S3C64XX_GPP_INPUT(__gpio) (0x0 << ((__gpio) * 2))
23#define S3C64XX_GPP_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
24
25#define S3C64XX_GPP0_MEM0_ADDRV (0x02 << 0)
26#define S3C64XX_GPP0_EINT_G8_0 (0x03 << 0)
27
28#define S3C64XX_GPP1_MEM0_SMCLK (0x02 << 2)
29#define S3C64XX_GPP1_EINT_G8_1 (0x03 << 2)
30
31#define S3C64XX_GPP2_MEM0_nWAIT (0x02 << 4)
32#define S3C64XX_GPP2_EINT_G8_2 (0x03 << 4)
33
34#define S3C64XX_GPP3_MEM0_RDY0_ALE (0x02 << 6)
35#define S3C64XX_GPP3_EINT_G8_3 (0x03 << 6)
36
37#define S3C64XX_GPP4_MEM0_RDY1_CLE (0x02 << 8)
38#define S3C64XX_GPP4_EINT_G8_4 (0x03 << 8)
39
40#define S3C64XX_GPP5_MEM0_INTsm0_FWE (0x02 << 10)
41#define S3C64XX_GPP5_EINT_G8_5 (0x03 << 10)
42
43#define S3C64XX_GPP6_MEM0_(null) (0x02 << 12)
44#define S3C64XX_GPP6_EINT_G8_6 (0x03 << 12)
45
46#define S3C64XX_GPP7_MEM0_INTsm1_FRE (0x02 << 14)
47#define S3C64XX_GPP7_EINT_G8_7 (0x03 << 14)
48
49#define S3C64XX_GPP8_MEM0_RPn_RnB (0x02 << 16)
50#define S3C64XX_GPP8_EINT_G8_8 (0x03 << 16)
51
52#define S3C64XX_GPP9_MEM0_ATA_RESET (0x02 << 18)
53#define S3C64XX_GPP9_EINT_G8_9 (0x03 << 18)
54
55#define S3C64XX_GPP10_MEM0_ATA_INPACK (0x02 << 20)
56#define S3C64XX_GPP10_EINT_G8_10 (0x03 << 20)
57
58#define S3C64XX_GPP11_MEM0_ATA_REG (0x02 << 22)
59#define S3C64XX_GPP11_EINT_G8_11 (0x03 << 22)
60
61#define S3C64XX_GPP12_MEM0_ATA_WE (0x02 << 24)
62#define S3C64XX_GPP12_EINT_G8_12 (0x03 << 24)
63
64#define S3C64XX_GPP13_MEM0_ATA_OE (0x02 << 26)
65#define S3C64XX_GPP13_EINT_G8_13 (0x03 << 26)
66
67#define S3C64XX_GPP14_MEM0_ATA_CD (0x02 << 28)
68#define S3C64XX_GPP14_EINT_G8_14 (0x03 << 28)
69
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-q.h b/arch/arm/mach-s3c64xx/include/mach/gpio-bank-q.h
deleted file mode 100644
index 1712223487b0..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/gpio-bank-q.h
+++ /dev/null
@@ -1,46 +0,0 @@
1/* linux/arch/arm/mach-s3c64xx/include/mach/gpio-bank-q.h
2 *
3 * Copyright 2008 Openmoko, Inc.
4 * Copyright 2008 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * GPIO Bank Q register and configuration definitions
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#define S3C64XX_GPQCON (S3C64XX_GPQ_BASE + 0x00)
16#define S3C64XX_GPQDAT (S3C64XX_GPQ_BASE + 0x04)
17#define S3C64XX_GPQPUD (S3C64XX_GPQ_BASE + 0x08)
18#define S3C64XX_GPQCONSLP (S3C64XX_GPQ_BASE + 0x0c)
19#define S3C64XX_GPQPUDSLP (S3C64XX_GPQ_BASE + 0x10)
20
21#define S3C64XX_GPQ_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
22#define S3C64XX_GPQ_INPUT(__gpio) (0x0 << ((__gpio) * 2))
23#define S3C64XX_GPQ_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
24
25#define S3C64XX_GPQ0_MEM0_ADDR18_RAS (0x02 << 0)
26#define S3C64XX_GPQ0_EINT_G9_0 (0x03 << 0)
27
28#define S3C64XX_GPQ1_MEM0_ADDR19_CAS (0x02 << 2)
29#define S3C64XX_GPQ1_EINT_G9_1 (0x03 << 2)
30
31#define S3C64XX_GPQ2_EINT_G9_2 (0x03 << 4)
32
33#define S3C64XX_GPQ3_EINT_G9_3 (0x03 << 6)
34
35#define S3C64XX_GPQ4_EINT_G9_4 (0x03 << 8)
36
37#define S3C64XX_GPQ5_EINT_G9_5 (0x03 << 10)
38
39#define S3C64XX_GPQ6_EINT_G9_6 (0x03 << 12)
40
41#define S3C64XX_GPQ7_MEM0_ADDR17_WENDMC (0x02 << 14)
42#define S3C64XX_GPQ7_EINT_G9_7 (0x03 << 14)
43
44#define S3C64XX_GPQ8_MEM0_ADDR16_APDMC (0x02 << 16)
45#define S3C64XX_GPQ8_EINT_G9_8 (0x03 << 16)
46
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index 686a4f270b12..2c0353a80906 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -50,7 +50,6 @@
50#include <mach/hardware.h> 50#include <mach/hardware.h>
51#include <mach/regs-fb.h> 51#include <mach/regs-fb.h>
52#include <mach/map.h> 52#include <mach/map.h>
53#include <mach/gpio-bank-f.h>
54 53
55#include <asm/irq.h> 54#include <asm/irq.h>
56#include <asm/mach-types.h> 55#include <asm/mach-types.h>
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c
index 79412f735a8d..bc1c470b7de6 100644
--- a/arch/arm/mach-s3c64xx/pm.c
+++ b/arch/arm/mach-s3c64xx/pm.c
@@ -30,26 +30,18 @@
30#include <mach/regs-gpio-memport.h> 30#include <mach/regs-gpio-memport.h>
31 31
32#ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK 32#ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK
33#include <mach/gpio-bank-n.h>
34
35void s3c_pm_debug_smdkled(u32 set, u32 clear) 33void s3c_pm_debug_smdkled(u32 set, u32 clear)
36{ 34{
37 unsigned long flags; 35 unsigned long flags;
38 u32 reg; 36 int i;
39 37
40 local_irq_save(flags); 38 local_irq_save(flags);
41 reg = __raw_readl(S3C64XX_GPNCON); 39 for (i = 0; i < 4; i++) {
42 reg &= ~(S3C64XX_GPN_CONMASK(12) | S3C64XX_GPN_CONMASK(13) | 40 if (clear & (1 << i))
43 S3C64XX_GPN_CONMASK(14) | S3C64XX_GPN_CONMASK(15)); 41 gpio_set_value(S3C64XX_GPN(12 + i), 0);
44 reg |= S3C64XX_GPN_OUTPUT(12) | S3C64XX_GPN_OUTPUT(13) | 42 if (set & (1 << i))
45 S3C64XX_GPN_OUTPUT(14) | S3C64XX_GPN_OUTPUT(15); 43 gpio_set_value(S3C64XX_GPN(12 + i), 1);
46 __raw_writel(reg, S3C64XX_GPNCON); 44 }
47
48 reg = __raw_readl(S3C64XX_GPNDAT);
49 reg &= ~(clear << 12);
50 reg |= set << 12;
51 __raw_writel(reg, S3C64XX_GPNDAT);
52
53 local_irq_restore(flags); 45 local_irq_restore(flags);
54} 46}
55#endif 47#endif
@@ -187,6 +179,18 @@ static int s3c64xx_pm_init(void)
187 pm_cpu_prep = s3c64xx_pm_prepare; 179 pm_cpu_prep = s3c64xx_pm_prepare;
188 pm_cpu_sleep = s3c64xx_cpu_suspend; 180 pm_cpu_sleep = s3c64xx_cpu_suspend;
189 pm_uart_udivslot = 1; 181 pm_uart_udivslot = 1;
182
183#ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK
184 gpio_request(S3C64XX_GPN(12), "DEBUG_LED0");
185 gpio_request(S3C64XX_GPN(13), "DEBUG_LED1");
186 gpio_request(S3C64XX_GPN(14), "DEBUG_LED2");
187 gpio_request(S3C64XX_GPN(15), "DEBUG_LED3");
188 gpio_direction_output(S3C64XX_GPN(12), 0);
189 gpio_direction_output(S3C64XX_GPN(13), 0);
190 gpio_direction_output(S3C64XX_GPN(14), 0);
191 gpio_direction_output(S3C64XX_GPN(15), 0);
192#endif
193
190 return 0; 194 return 0;
191} 195}
192 196
diff --git a/arch/arm/mach-s3c64xx/setup-i2c0.c b/arch/arm/mach-s3c64xx/setup-i2c0.c
index 406192a43c6e..241af94a9e70 100644
--- a/arch/arm/mach-s3c64xx/setup-i2c0.c
+++ b/arch/arm/mach-s3c64xx/setup-i2c0.c
@@ -18,14 +18,11 @@
18 18
19struct platform_device; /* don't need the contents */ 19struct platform_device; /* don't need the contents */
20 20
21#include <mach/gpio-bank-b.h>
22#include <plat/iic.h> 21#include <plat/iic.h>
23#include <plat/gpio-cfg.h> 22#include <plat/gpio-cfg.h>
24 23
25void s3c_i2c0_cfg_gpio(struct platform_device *dev) 24void s3c_i2c0_cfg_gpio(struct platform_device *dev)
26{ 25{
27 s3c_gpio_cfgpin(S3C64XX_GPB(5), S3C64XX_GPB5_I2C_SCL0); 26 s3c_gpio_cfgall_range(S3C64XX_GPB(5), 2,
28 s3c_gpio_cfgpin(S3C64XX_GPB(6), S3C64XX_GPB6_I2C_SDA0); 27 S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
29 s3c_gpio_setpull(S3C64XX_GPB(5), S3C_GPIO_PULL_UP);
30 s3c_gpio_setpull(S3C64XX_GPB(6), S3C_GPIO_PULL_UP);
31} 28}
diff --git a/arch/arm/mach-s3c64xx/setup-i2c1.c b/arch/arm/mach-s3c64xx/setup-i2c1.c
index 1ee62c97cd7f..3d13a961986d 100644
--- a/arch/arm/mach-s3c64xx/setup-i2c1.c
+++ b/arch/arm/mach-s3c64xx/setup-i2c1.c
@@ -18,14 +18,11 @@
18 18
19struct platform_device; /* don't need the contents */ 19struct platform_device; /* don't need the contents */
20 20
21#include <mach/gpio-bank-b.h>
22#include <plat/iic.h> 21#include <plat/iic.h>
23#include <plat/gpio-cfg.h> 22#include <plat/gpio-cfg.h>
24 23
25void s3c_i2c1_cfg_gpio(struct platform_device *dev) 24void s3c_i2c1_cfg_gpio(struct platform_device *dev)
26{ 25{
27 s3c_gpio_cfgpin(S3C64XX_GPB(2), S3C64XX_GPB2_I2C_SCL1); 26 s3c_gpio_cfgall_range(S3C64XX_GPB(2), 2,
28 s3c_gpio_cfgpin(S3C64XX_GPB(3), S3C64XX_GPB3_I2C_SDA1); 27 S3C_GPIO_SFN(6), S3C_GPIO_PULL_UP);
29 s3c_gpio_setpull(S3C64XX_GPB(2), S3C_GPIO_PULL_UP);
30 s3c_gpio_setpull(S3C64XX_GPB(3), S3C_GPIO_PULL_UP);
31} 28}
diff --git a/arch/arm/mach-s3c64xx/sleep.S b/arch/arm/mach-s3c64xx/sleep.S
index afe5a762f46e..1f87732b2320 100644
--- a/arch/arm/mach-s3c64xx/sleep.S
+++ b/arch/arm/mach-s3c64xx/sleep.S
@@ -20,7 +20,6 @@
20#define S3C64XX_VA_GPIO (0x0) 20#define S3C64XX_VA_GPIO (0x0)
21 21
22#include <mach/regs-gpio.h> 22#include <mach/regs-gpio.h>
23#include <mach/gpio-bank-n.h>
24 23
25#define LL_UART (S3C_PA_UART + (0x400 * CONFIG_S3C_LOWLEVEL_UART_PORT)) 24#define LL_UART (S3C_PA_UART + (0x400 * CONFIG_S3C_LOWLEVEL_UART_PORT))
26 25
@@ -68,6 +67,13 @@ ENTRY(s3c_cpu_resume)
68 ldr r2, =LL_UART /* for debug */ 67 ldr r2, =LL_UART /* for debug */
69 68
70#ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK 69#ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK
70
71#define S3C64XX_GPNCON (S3C64XX_GPN_BASE + 0x00)
72#define S3C64XX_GPNDAT (S3C64XX_GPN_BASE + 0x04)
73
74#define S3C64XX_GPN_CONMASK(__gpio) (0x3 << ((__gpio) * 2))
75#define S3C64XX_GPN_OUTPUT(__gpio) (0x1 << ((__gpio) * 2))
76
71 /* Initialise the GPIO state if we are debugging via the SMDK LEDs, 77 /* Initialise the GPIO state if we are debugging via the SMDK LEDs,
72 * as the uboot version supplied resets these to inputs during the 78 * as the uboot version supplied resets these to inputs during the
73 * resume checks. 79 * resume checks.
diff --git a/arch/arm/mach-s5p6442/Kconfig b/arch/arm/mach-s5p6442/Kconfig
deleted file mode 100644
index 33569e4007c4..000000000000
--- a/arch/arm/mach-s5p6442/Kconfig
+++ /dev/null
@@ -1,25 +0,0 @@
1# arch/arm/mach-s5p6442/Kconfig
2#
3# Copyright (c) 2010 Samsung Electronics Co., Ltd.
4# http://www.samsung.com/
5#
6# Licensed under GPLv2
7
8# Configuration options for the S5P6442
9
10if ARCH_S5P6442
11
12config CPU_S5P6442
13 bool
14 select S3C_PL330_DMA
15 help
16 Enable S5P6442 CPU support
17
18config MACH_SMDK6442
19 bool "SMDK6442"
20 select CPU_S5P6442
21 select S3C_DEV_WDT
22 help
23 Machine support for Samsung SMDK6442
24
25endif
diff --git a/arch/arm/mach-s5p6442/Makefile b/arch/arm/mach-s5p6442/Makefile
deleted file mode 100644
index 90a3d8373416..000000000000
--- a/arch/arm/mach-s5p6442/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
1# arch/arm/mach-s5p6442/Makefile
2#
3# Copyright (c) 2010 Samsung Electronics Co., Ltd.
4# http://www.samsung.com/
5#
6# Licensed under GPLv2
7
8obj-y :=
9obj-m :=
10obj-n :=
11obj- :=
12
13# Core support for S5P6442 system
14
15obj-$(CONFIG_CPU_S5P6442) += cpu.o init.o clock.o dma.o
16obj-$(CONFIG_CPU_S5P6442) += setup-i2c0.o
17
18# machine support
19
20obj-$(CONFIG_MACH_SMDK6442) += mach-smdk6442.o
21
22# device support
23obj-y += dev-audio.o
24obj-$(CONFIG_S3C64XX_DEV_SPI) += dev-spi.o
diff --git a/arch/arm/mach-s5p6442/Makefile.boot b/arch/arm/mach-s5p6442/Makefile.boot
deleted file mode 100644
index ff90aa13bd67..000000000000
--- a/arch/arm/mach-s5p6442/Makefile.boot
+++ /dev/null
@@ -1,2 +0,0 @@
1 zreladdr-y := 0x20008000
2params_phys-y := 0x20000100
diff --git a/arch/arm/mach-s5p6442/clock.c b/arch/arm/mach-s5p6442/clock.c
deleted file mode 100644
index fbbc7bede685..000000000000
--- a/arch/arm/mach-s5p6442/clock.c
+++ /dev/null
@@ -1,420 +0,0 @@
1/* linux/arch/arm/mach-s5p6442/clock.c
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * S5P6442 - Clock support
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/list.h>
17#include <linux/err.h>
18#include <linux/clk.h>
19#include <linux/io.h>
20
21#include <mach/map.h>
22
23#include <plat/cpu-freq.h>
24#include <mach/regs-clock.h>
25#include <plat/clock.h>
26#include <plat/cpu.h>
27#include <plat/pll.h>
28#include <plat/s5p-clock.h>
29#include <plat/clock-clksrc.h>
30#include <plat/s5p6442.h>
31
32static struct clksrc_clk clk_mout_apll = {
33 .clk = {
34 .name = "mout_apll",
35 .id = -1,
36 },
37 .sources = &clk_src_apll,
38 .reg_src = { .reg = S5P_CLK_SRC0, .shift = 0, .size = 1 },
39};
40
41static struct clksrc_clk clk_mout_mpll = {
42 .clk = {
43 .name = "mout_mpll",
44 .id = -1,
45 },
46 .sources = &clk_src_mpll,
47 .reg_src = { .reg = S5P_CLK_SRC0, .shift = 4, .size = 1 },
48};
49
50static struct clksrc_clk clk_mout_epll = {
51 .clk = {
52 .name = "mout_epll",
53 .id = -1,
54 },
55 .sources = &clk_src_epll,
56 .reg_src = { .reg = S5P_CLK_SRC0, .shift = 8, .size = 1 },
57};
58
59/* Possible clock sources for ARM Mux */
60static struct clk *clk_src_arm_list[] = {
61 [1] = &clk_mout_apll.clk,
62 [2] = &clk_mout_mpll.clk,
63};
64
65static struct clksrc_sources clk_src_arm = {
66 .sources = clk_src_arm_list,
67 .nr_sources = ARRAY_SIZE(clk_src_arm_list),
68};
69
70static struct clksrc_clk clk_mout_arm = {
71 .clk = {
72 .name = "mout_arm",
73 .id = -1,
74 },
75 .sources = &clk_src_arm,
76 .reg_src = { .reg = S5P_CLK_MUX_STAT0, .shift = 16, .size = 3 },
77};
78
79static struct clk clk_dout_a2m = {
80 .name = "dout_a2m",
81 .id = -1,
82 .parent = &clk_mout_apll.clk,
83};
84
85/* Possible clock sources for D0 Mux */
86static struct clk *clk_src_d0_list[] = {
87 [1] = &clk_mout_mpll.clk,
88 [2] = &clk_dout_a2m,
89};
90
91static struct clksrc_sources clk_src_d0 = {
92 .sources = clk_src_d0_list,
93 .nr_sources = ARRAY_SIZE(clk_src_d0_list),
94};
95
96static struct clksrc_clk clk_mout_d0 = {
97 .clk = {
98 .name = "mout_d0",
99 .id = -1,
100 },
101 .sources = &clk_src_d0,
102 .reg_src = { .reg = S5P_CLK_MUX_STAT0, .shift = 20, .size = 3 },
103};
104
105static struct clk clk_dout_apll = {
106 .name = "dout_apll",
107 .id = -1,
108 .parent = &clk_mout_arm.clk,
109};
110
111/* Possible clock sources for D0SYNC Mux */
112static struct clk *clk_src_d0sync_list[] = {
113 [1] = &clk_mout_d0.clk,
114 [2] = &clk_dout_apll,
115};
116
117static struct clksrc_sources clk_src_d0sync = {
118 .sources = clk_src_d0sync_list,
119 .nr_sources = ARRAY_SIZE(clk_src_d0sync_list),
120};
121
122static struct clksrc_clk clk_mout_d0sync = {
123 .clk = {
124 .name = "mout_d0sync",
125 .id = -1,
126 },
127 .sources = &clk_src_d0sync,
128 .reg_src = { .reg = S5P_CLK_MUX_STAT1, .shift = 28, .size = 3 },
129};
130
131/* Possible clock sources for D1 Mux */
132static struct clk *clk_src_d1_list[] = {
133 [1] = &clk_mout_mpll.clk,
134 [2] = &clk_dout_a2m,
135};
136
137static struct clksrc_sources clk_src_d1 = {
138 .sources = clk_src_d1_list,
139 .nr_sources = ARRAY_SIZE(clk_src_d1_list),
140};
141
142static struct clksrc_clk clk_mout_d1 = {
143 .clk = {
144 .name = "mout_d1",
145 .id = -1,
146 },
147 .sources = &clk_src_d1,
148 .reg_src = { .reg = S5P_CLK_MUX_STAT0, .shift = 24, .size = 3 },
149};
150
151/* Possible clock sources for D1SYNC Mux */
152static struct clk *clk_src_d1sync_list[] = {
153 [1] = &clk_mout_d1.clk,
154 [2] = &clk_dout_apll,
155};
156
157static struct clksrc_sources clk_src_d1sync = {
158 .sources = clk_src_d1sync_list,
159 .nr_sources = ARRAY_SIZE(clk_src_d1sync_list),
160};
161
162static struct clksrc_clk clk_mout_d1sync = {
163 .clk = {
164 .name = "mout_d1sync",
165 .id = -1,
166 },
167 .sources = &clk_src_d1sync,
168 .reg_src = { .reg = S5P_CLK_MUX_STAT1, .shift = 24, .size = 3 },
169};
170
171static struct clk clk_hclkd0 = {
172 .name = "hclkd0",
173 .id = -1,
174 .parent = &clk_mout_d0sync.clk,
175};
176
177static struct clk clk_hclkd1 = {
178 .name = "hclkd1",
179 .id = -1,
180 .parent = &clk_mout_d1sync.clk,
181};
182
183static struct clk clk_pclkd0 = {
184 .name = "pclkd0",
185 .id = -1,
186 .parent = &clk_hclkd0,
187};
188
189static struct clk clk_pclkd1 = {
190 .name = "pclkd1",
191 .id = -1,
192 .parent = &clk_hclkd1,
193};
194
195int s5p6442_clk_ip0_ctrl(struct clk *clk, int enable)
196{
197 return s5p_gatectrl(S5P_CLKGATE_IP0, clk, enable);
198}
199
200int s5p6442_clk_ip3_ctrl(struct clk *clk, int enable)
201{
202 return s5p_gatectrl(S5P_CLKGATE_IP3, clk, enable);
203}
204
205static struct clksrc_clk clksrcs[] = {
206 {
207 .clk = {
208 .name = "dout_a2m",
209 .id = -1,
210 .parent = &clk_mout_apll.clk,
211 },
212 .sources = &clk_src_apll,
213 .reg_src = { .reg = S5P_CLK_SRC0, .shift = 0, .size = 1 },
214 .reg_div = { .reg = S5P_CLK_DIV0, .shift = 4, .size = 3 },
215 }, {
216 .clk = {
217 .name = "dout_apll",
218 .id = -1,
219 .parent = &clk_mout_arm.clk,
220 },
221 .sources = &clk_src_arm,
222 .reg_src = { .reg = S5P_CLK_MUX_STAT0, .shift = 16, .size = 3 },
223 .reg_div = { .reg = S5P_CLK_DIV0, .shift = 0, .size = 3 },
224 }, {
225 .clk = {
226 .name = "hclkd1",
227 .id = -1,
228 .parent = &clk_mout_d1sync.clk,
229 },
230 .sources = &clk_src_d1sync,
231 .reg_src = { .reg = S5P_CLK_MUX_STAT1, .shift = 24, .size = 3 },
232 .reg_div = { .reg = S5P_CLK_DIV0, .shift = 24, .size = 4 },
233 }, {
234 .clk = {
235 .name = "hclkd0",
236 .id = -1,
237 .parent = &clk_mout_d0sync.clk,
238 },
239 .sources = &clk_src_d0sync,
240 .reg_src = { .reg = S5P_CLK_MUX_STAT1, .shift = 28, .size = 3 },
241 .reg_div = { .reg = S5P_CLK_DIV0, .shift = 16, .size = 4 },
242 }, {
243 .clk = {
244 .name = "pclkd0",
245 .id = -1,
246 .parent = &clk_hclkd0,
247 },
248 .sources = &clk_src_d0sync,
249 .reg_src = { .reg = S5P_CLK_MUX_STAT1, .shift = 28, .size = 3 },
250 .reg_div = { .reg = S5P_CLK_DIV0, .shift = 20, .size = 3 },
251 }, {
252 .clk = {
253 .name = "pclkd1",
254 .id = -1,
255 .parent = &clk_hclkd1,
256 },
257 .sources = &clk_src_d1sync,
258 .reg_src = { .reg = S5P_CLK_MUX_STAT1, .shift = 24, .size = 3 },
259 .reg_div = { .reg = S5P_CLK_DIV0, .shift = 28, .size = 3 },
260 }
261};
262
263/* Clock initialisation code */
264static struct clksrc_clk *init_parents[] = {
265 &clk_mout_apll,
266 &clk_mout_mpll,
267 &clk_mout_epll,
268 &clk_mout_arm,
269 &clk_mout_d0,
270 &clk_mout_d0sync,
271 &clk_mout_d1,
272 &clk_mout_d1sync,
273};
274
275void __init_or_cpufreq s5p6442_setup_clocks(void)
276{
277 struct clk *pclkd0_clk;
278 struct clk *pclkd1_clk;
279
280 unsigned long xtal;
281 unsigned long arm;
282 unsigned long hclkd0 = 0;
283 unsigned long hclkd1 = 0;
284 unsigned long pclkd0 = 0;
285 unsigned long pclkd1 = 0;
286
287 unsigned long apll;
288 unsigned long mpll;
289 unsigned long epll;
290 unsigned int ptr;
291
292 printk(KERN_DEBUG "%s: registering clocks\n", __func__);
293
294 xtal = clk_get_rate(&clk_xtal);
295
296 printk(KERN_DEBUG "%s: xtal is %ld\n", __func__, xtal);
297
298 apll = s5p_get_pll45xx(xtal, __raw_readl(S5P_APLL_CON), pll_4508);
299 mpll = s5p_get_pll45xx(xtal, __raw_readl(S5P_MPLL_CON), pll_4502);
300 epll = s5p_get_pll45xx(xtal, __raw_readl(S5P_EPLL_CON), pll_4500);
301
302 printk(KERN_INFO "S5P6442: PLL settings, A=%ld, M=%ld, E=%ld",
303 apll, mpll, epll);
304
305 clk_fout_apll.rate = apll;
306 clk_fout_mpll.rate = mpll;
307 clk_fout_epll.rate = epll;
308
309 for (ptr = 0; ptr < ARRAY_SIZE(init_parents); ptr++)
310 s3c_set_clksrc(init_parents[ptr], true);
311
312 for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
313 s3c_set_clksrc(&clksrcs[ptr], true);
314
315 arm = clk_get_rate(&clk_dout_apll);
316 hclkd0 = clk_get_rate(&clk_hclkd0);
317 hclkd1 = clk_get_rate(&clk_hclkd1);
318
319 pclkd0_clk = clk_get(NULL, "pclkd0");
320 BUG_ON(IS_ERR(pclkd0_clk));
321
322 pclkd0 = clk_get_rate(pclkd0_clk);
323 clk_put(pclkd0_clk);
324
325 pclkd1_clk = clk_get(NULL, "pclkd1");
326 BUG_ON(IS_ERR(pclkd1_clk));
327
328 pclkd1 = clk_get_rate(pclkd1_clk);
329 clk_put(pclkd1_clk);
330
331 printk(KERN_INFO "S5P6442: HCLKD0=%ld, HCLKD1=%ld, PCLKD0=%ld, PCLKD1=%ld\n",
332 hclkd0, hclkd1, pclkd0, pclkd1);
333
334 /* For backward compatibility */
335 clk_f.rate = arm;
336 clk_h.rate = hclkd1;
337 clk_p.rate = pclkd1;
338
339 clk_pclkd0.rate = pclkd0;
340 clk_pclkd1.rate = pclkd1;
341}
342
343static struct clk init_clocks_off[] = {
344 {
345 .name = "pdma",
346 .id = -1,
347 .parent = &clk_pclkd1,
348 .enable = s5p6442_clk_ip0_ctrl,
349 .ctrlbit = (1 << 3),
350 },
351};
352
353static struct clk init_clocks[] = {
354 {
355 .name = "systimer",
356 .id = -1,
357 .parent = &clk_pclkd1,
358 .enable = s5p6442_clk_ip3_ctrl,
359 .ctrlbit = (1<<16),
360 }, {
361 .name = "uart",
362 .id = 0,
363 .parent = &clk_pclkd1,
364 .enable = s5p6442_clk_ip3_ctrl,
365 .ctrlbit = (1<<17),
366 }, {
367 .name = "uart",
368 .id = 1,
369 .parent = &clk_pclkd1,
370 .enable = s5p6442_clk_ip3_ctrl,
371 .ctrlbit = (1<<18),
372 }, {
373 .name = "uart",
374 .id = 2,
375 .parent = &clk_pclkd1,
376 .enable = s5p6442_clk_ip3_ctrl,
377 .ctrlbit = (1<<19),
378 }, {
379 .name = "watchdog",
380 .id = -1,
381 .parent = &clk_pclkd1,
382 .enable = s5p6442_clk_ip3_ctrl,
383 .ctrlbit = (1 << 22),
384 }, {
385 .name = "timers",
386 .id = -1,
387 .parent = &clk_pclkd1,
388 .enable = s5p6442_clk_ip3_ctrl,
389 .ctrlbit = (1<<23),
390 },
391};
392
393static struct clk *clks[] __initdata = {
394 &clk_ext,
395 &clk_epll,
396 &clk_mout_apll.clk,
397 &clk_mout_mpll.clk,
398 &clk_mout_epll.clk,
399 &clk_mout_d0.clk,
400 &clk_mout_d0sync.clk,
401 &clk_mout_d1.clk,
402 &clk_mout_d1sync.clk,
403 &clk_hclkd0,
404 &clk_pclkd0,
405 &clk_hclkd1,
406 &clk_pclkd1,
407};
408
409void __init s5p6442_register_clocks(void)
410{
411 s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
412
413 s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
414 s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
415
416 s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
417 s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
418
419 s3c_pwmclk_init();
420}
diff --git a/arch/arm/mach-s5p6442/cpu.c b/arch/arm/mach-s5p6442/cpu.c
deleted file mode 100644
index 842af86bda6d..000000000000
--- a/arch/arm/mach-s5p6442/cpu.c
+++ /dev/null
@@ -1,143 +0,0 @@
1/* linux/arch/arm/mach-s5p6442/cpu.c
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9*/
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/interrupt.h>
14#include <linux/list.h>
15#include <linux/timer.h>
16#include <linux/init.h>
17#include <linux/clk.h>
18#include <linux/io.h>
19#include <linux/sysdev.h>
20#include <linux/serial_core.h>
21#include <linux/platform_device.h>
22#include <linux/sched.h>
23
24#include <asm/mach/arch.h>
25#include <asm/mach/map.h>
26#include <asm/mach/irq.h>
27
28#include <asm/proc-fns.h>
29
30#include <mach/hardware.h>
31#include <mach/map.h>
32#include <asm/irq.h>
33
34#include <plat/regs-serial.h>
35#include <mach/regs-clock.h>
36
37#include <plat/cpu.h>
38#include <plat/devs.h>
39#include <plat/clock.h>
40#include <plat/s5p6442.h>
41
42/* Initial IO mappings */
43
44static struct map_desc s5p6442_iodesc[] __initdata = {
45 {
46 .virtual = (unsigned long)S5P_VA_SYSTIMER,
47 .pfn = __phys_to_pfn(S5P6442_PA_SYSTIMER),
48 .length = SZ_16K,
49 .type = MT_DEVICE,
50 }, {
51 .virtual = (unsigned long)S5P_VA_GPIO,
52 .pfn = __phys_to_pfn(S5P6442_PA_GPIO),
53 .length = SZ_4K,
54 .type = MT_DEVICE,
55 }, {
56 .virtual = (unsigned long)VA_VIC0,
57 .pfn = __phys_to_pfn(S5P6442_PA_VIC0),
58 .length = SZ_16K,
59 .type = MT_DEVICE,
60 }, {
61 .virtual = (unsigned long)VA_VIC1,
62 .pfn = __phys_to_pfn(S5P6442_PA_VIC1),
63 .length = SZ_16K,
64 .type = MT_DEVICE,
65 }, {
66 .virtual = (unsigned long)VA_VIC2,
67 .pfn = __phys_to_pfn(S5P6442_PA_VIC2),
68 .length = SZ_16K,
69 .type = MT_DEVICE,
70 }, {
71 .virtual = (unsigned long)S3C_VA_UART,
72 .pfn = __phys_to_pfn(S3C_PA_UART),
73 .length = SZ_512K,
74 .type = MT_DEVICE,
75 }
76};
77
78static void s5p6442_idle(void)
79{
80 if (!need_resched())
81 cpu_do_idle();
82
83 local_irq_enable();
84}
85
86/*
87 * s5p6442_map_io
88 *
89 * register the standard cpu IO areas
90 */
91
92void __init s5p6442_map_io(void)
93{
94 iotable_init(s5p6442_iodesc, ARRAY_SIZE(s5p6442_iodesc));
95}
96
97void __init s5p6442_init_clocks(int xtal)
98{
99 printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
100
101 s3c24xx_register_baseclocks(xtal);
102 s5p_register_clocks(xtal);
103 s5p6442_register_clocks();
104 s5p6442_setup_clocks();
105}
106
107void __init s5p6442_init_irq(void)
108{
109 /* S5P6442 supports 3 VIC */
110 u32 vic[3];
111
112 /* VIC0, VIC1, and VIC2: some interrupt reserved */
113 vic[0] = 0x7fefffff;
114 vic[1] = 0X7f389c81;
115 vic[2] = 0X1bbbcfff;
116
117 s5p_init_irq(vic, ARRAY_SIZE(vic));
118}
119
120struct sysdev_class s5p6442_sysclass = {
121 .name = "s5p6442-core",
122};
123
124static struct sys_device s5p6442_sysdev = {
125 .cls = &s5p6442_sysclass,
126};
127
128static int __init s5p6442_core_init(void)
129{
130 return sysdev_class_register(&s5p6442_sysclass);
131}
132
133core_initcall(s5p6442_core_init);
134
135int __init s5p6442_init(void)
136{
137 printk(KERN_INFO "S5P6442: Initializing architecture\n");
138
139 /* set idle function */
140 pm_idle = s5p6442_idle;
141
142 return sysdev_register(&s5p6442_sysdev);
143}
diff --git a/arch/arm/mach-s5p6442/dev-audio.c b/arch/arm/mach-s5p6442/dev-audio.c
deleted file mode 100644
index 8719dc41fe32..000000000000
--- a/arch/arm/mach-s5p6442/dev-audio.c
+++ /dev/null
@@ -1,217 +0,0 @@
1/* linux/arch/arm/mach-s5p6442/dev-audio.c
2 *
3 * Copyright (c) 2010 Samsung Electronics Co. Ltd
4 * Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/platform_device.h>
12#include <linux/dma-mapping.h>
13#include <linux/gpio.h>
14
15#include <plat/gpio-cfg.h>
16#include <plat/audio.h>
17
18#include <mach/map.h>
19#include <mach/dma.h>
20#include <mach/irqs.h>
21
22static int s5p6442_cfg_i2s(struct platform_device *pdev)
23{
24 unsigned int base;
25
26 /* configure GPIO for i2s port */
27 switch (pdev->id) {
28 case 1:
29 base = S5P6442_GPC1(0);
30 break;
31
32 case 0:
33 base = S5P6442_GPC0(0);
34 break;
35
36 default:
37 printk(KERN_ERR "Invalid Device %d\n", pdev->id);
38 return -EINVAL;
39 }
40
41 s3c_gpio_cfgpin_range(base, 5, S3C_GPIO_SFN(2));
42 return 0;
43}
44
45static const char *rclksrc_v35[] = {
46 [0] = "busclk",
47 [1] = "i2sclk",
48};
49
50static struct s3c_audio_pdata i2sv35_pdata = {
51 .cfg_gpio = s5p6442_cfg_i2s,
52 .type = {
53 .i2s = {
54 .quirks = QUIRK_SEC_DAI | QUIRK_NEED_RSTCLR,
55 .src_clk = rclksrc_v35,
56 },
57 },
58};
59
60static struct resource s5p6442_iis0_resource[] = {
61 [0] = {
62 .start = S5P6442_PA_I2S0,
63 .end = S5P6442_PA_I2S0 + 0x100 - 1,
64 .flags = IORESOURCE_MEM,
65 },
66 [1] = {
67 .start = DMACH_I2S0_TX,
68 .end = DMACH_I2S0_TX,
69 .flags = IORESOURCE_DMA,
70 },
71 [2] = {
72 .start = DMACH_I2S0_RX,
73 .end = DMACH_I2S0_RX,
74 .flags = IORESOURCE_DMA,
75 },
76 [3] = {
77 .start = DMACH_I2S0S_TX,
78 .end = DMACH_I2S0S_TX,
79 .flags = IORESOURCE_DMA,
80 },
81};
82
83struct platform_device s5p6442_device_iis0 = {
84 .name = "samsung-i2s",
85 .id = 0,
86 .num_resources = ARRAY_SIZE(s5p6442_iis0_resource),
87 .resource = s5p6442_iis0_resource,
88 .dev = {
89 .platform_data = &i2sv35_pdata,
90 },
91};
92
93static const char *rclksrc_v3[] = {
94 [0] = "iis",
95 [1] = "sclk_audio",
96};
97
98static struct s3c_audio_pdata i2sv3_pdata = {
99 .cfg_gpio = s5p6442_cfg_i2s,
100 .type = {
101 .i2s = {
102 .src_clk = rclksrc_v3,
103 },
104 },
105};
106
107static struct resource s5p6442_iis1_resource[] = {
108 [0] = {
109 .start = S5P6442_PA_I2S1,
110 .end = S5P6442_PA_I2S1 + 0x100 - 1,
111 .flags = IORESOURCE_MEM,
112 },
113 [1] = {
114 .start = DMACH_I2S1_TX,
115 .end = DMACH_I2S1_TX,
116 .flags = IORESOURCE_DMA,
117 },
118 [2] = {
119 .start = DMACH_I2S1_RX,
120 .end = DMACH_I2S1_RX,
121 .flags = IORESOURCE_DMA,
122 },
123};
124
125struct platform_device s5p6442_device_iis1 = {
126 .name = "samsung-i2s",
127 .id = 1,
128 .num_resources = ARRAY_SIZE(s5p6442_iis1_resource),
129 .resource = s5p6442_iis1_resource,
130 .dev = {
131 .platform_data = &i2sv3_pdata,
132 },
133};
134
135/* PCM Controller platform_devices */
136
137static int s5p6442_pcm_cfg_gpio(struct platform_device *pdev)
138{
139 unsigned int base;
140
141 switch (pdev->id) {
142 case 0:
143 base = S5P6442_GPC0(0);
144 break;
145
146 case 1:
147 base = S5P6442_GPC1(0);
148 break;
149
150 default:
151 printk(KERN_DEBUG "Invalid PCM Controller number!");
152 return -EINVAL;
153 }
154
155 s3c_gpio_cfgpin_range(base, 5, S3C_GPIO_SFN(3));
156 return 0;
157}
158
159static struct s3c_audio_pdata s3c_pcm_pdata = {
160 .cfg_gpio = s5p6442_pcm_cfg_gpio,
161};
162
163static struct resource s5p6442_pcm0_resource[] = {
164 [0] = {
165 .start = S5P6442_PA_PCM0,
166 .end = S5P6442_PA_PCM0 + 0x100 - 1,
167 .flags = IORESOURCE_MEM,
168 },
169 [1] = {
170 .start = DMACH_PCM0_TX,
171 .end = DMACH_PCM0_TX,
172 .flags = IORESOURCE_DMA,
173 },
174 [2] = {
175 .start = DMACH_PCM0_RX,
176 .end = DMACH_PCM0_RX,
177 .flags = IORESOURCE_DMA,
178 },
179};
180
181struct platform_device s5p6442_device_pcm0 = {
182 .name = "samsung-pcm",
183 .id = 0,
184 .num_resources = ARRAY_SIZE(s5p6442_pcm0_resource),
185 .resource = s5p6442_pcm0_resource,
186 .dev = {
187 .platform_data = &s3c_pcm_pdata,
188 },
189};
190
191static struct resource s5p6442_pcm1_resource[] = {
192 [0] = {
193 .start = S5P6442_PA_PCM1,
194 .end = S5P6442_PA_PCM1 + 0x100 - 1,
195 .flags = IORESOURCE_MEM,
196 },
197 [1] = {
198 .start = DMACH_PCM1_TX,
199 .end = DMACH_PCM1_TX,
200 .flags = IORESOURCE_DMA,
201 },
202 [2] = {
203 .start = DMACH_PCM1_RX,
204 .end = DMACH_PCM1_RX,
205 .flags = IORESOURCE_DMA,
206 },
207};
208
209struct platform_device s5p6442_device_pcm1 = {
210 .name = "samsung-pcm",
211 .id = 1,
212 .num_resources = ARRAY_SIZE(s5p6442_pcm1_resource),
213 .resource = s5p6442_pcm1_resource,
214 .dev = {
215 .platform_data = &s3c_pcm_pdata,
216 },
217};
diff --git a/arch/arm/mach-s5p6442/dev-spi.c b/arch/arm/mach-s5p6442/dev-spi.c
deleted file mode 100644
index cce8c2470709..000000000000
--- a/arch/arm/mach-s5p6442/dev-spi.c
+++ /dev/null
@@ -1,121 +0,0 @@
1/* linux/arch/arm/mach-s5p6442/dev-spi.c
2 *
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/platform_device.h>
12#include <linux/dma-mapping.h>
13#include <linux/gpio.h>
14
15#include <mach/dma.h>
16#include <mach/map.h>
17#include <mach/irqs.h>
18#include <mach/spi-clocks.h>
19
20#include <plat/s3c64xx-spi.h>
21#include <plat/gpio-cfg.h>
22
23static char *spi_src_clks[] = {
24 [S5P6442_SPI_SRCCLK_PCLK] = "pclk",
25 [S5P6442_SPI_SRCCLK_SCLK] = "spi_epll",
26};
27
28/* SPI Controller platform_devices */
29
30/* Since we emulate multi-cs capability, we do not touch the CS.
31 * The emulated CS is toggled by board specific mechanism, as it can
32 * be either some immediate GPIO or some signal out of some other
33 * chip in between ... or some yet another way.
34 * We simply do not assume anything about CS.
35 */
36static int s5p6442_spi_cfg_gpio(struct platform_device *pdev)
37{
38 switch (pdev->id) {
39 case 0:
40 s3c_gpio_cfgpin(S5P6442_GPB(0), S3C_GPIO_SFN(2));
41 s3c_gpio_setpull(S5P6442_GPB(0), S3C_GPIO_PULL_UP);
42 s3c_gpio_cfgall_range(S5P6442_GPB(2), 2,
43 S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
44 break;
45
46 default:
47 dev_err(&pdev->dev, "Invalid SPI Controller number!");
48 return -EINVAL;
49 }
50
51 return 0;
52}
53
54static struct resource s5p6442_spi0_resource[] = {
55 [0] = {
56 .start = S5P6442_PA_SPI,
57 .end = S5P6442_PA_SPI + 0x100 - 1,
58 .flags = IORESOURCE_MEM,
59 },
60 [1] = {
61 .start = DMACH_SPI0_TX,
62 .end = DMACH_SPI0_TX,
63 .flags = IORESOURCE_DMA,
64 },
65 [2] = {
66 .start = DMACH_SPI0_RX,
67 .end = DMACH_SPI0_RX,
68 .flags = IORESOURCE_DMA,
69 },
70 [3] = {
71 .start = IRQ_SPI0,
72 .end = IRQ_SPI0,
73 .flags = IORESOURCE_IRQ,
74 },
75};
76
77static struct s3c64xx_spi_info s5p6442_spi0_pdata = {
78 .cfg_gpio = s5p6442_spi_cfg_gpio,
79 .fifo_lvl_mask = 0x1ff,
80 .rx_lvl_offset = 15,
81};
82
83static u64 spi_dmamask = DMA_BIT_MASK(32);
84
85struct platform_device s5p6442_device_spi = {
86 .name = "s3c64xx-spi",
87 .id = 0,
88 .num_resources = ARRAY_SIZE(s5p6442_spi0_resource),
89 .resource = s5p6442_spi0_resource,
90 .dev = {
91 .dma_mask = &spi_dmamask,
92 .coherent_dma_mask = DMA_BIT_MASK(32),
93 .platform_data = &s5p6442_spi0_pdata,
94 },
95};
96
97void __init s5p6442_spi_set_info(int cntrlr, int src_clk_nr, int num_cs)
98{
99 struct s3c64xx_spi_info *pd;
100
101 /* Reject invalid configuration */
102 if (!num_cs || src_clk_nr < 0
103 || src_clk_nr > S5P6442_SPI_SRCCLK_SCLK) {
104 printk(KERN_ERR "%s: Invalid SPI configuration\n", __func__);
105 return;
106 }
107
108 switch (cntrlr) {
109 case 0:
110 pd = &s5p6442_spi0_pdata;
111 break;
112 default:
113 printk(KERN_ERR "%s: Invalid SPI controller(%d)\n",
114 __func__, cntrlr);
115 return;
116 }
117
118 pd->num_cs = num_cs;
119 pd->src_clk_nr = src_clk_nr;
120 pd->src_clk_name = spi_src_clks[src_clk_nr];
121}
diff --git a/arch/arm/mach-s5p6442/dma.c b/arch/arm/mach-s5p6442/dma.c
deleted file mode 100644
index 7dfb13654f8a..000000000000
--- a/arch/arm/mach-s5p6442/dma.c
+++ /dev/null
@@ -1,105 +0,0 @@
1/*
2 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
3 * Jaswinder Singh <jassi.brar@samsung.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20#include <linux/platform_device.h>
21#include <linux/dma-mapping.h>
22
23#include <plat/devs.h>
24#include <plat/irqs.h>
25
26#include <mach/map.h>
27#include <mach/irqs.h>
28
29#include <plat/s3c-pl330-pdata.h>
30
31static u64 dma_dmamask = DMA_BIT_MASK(32);
32
33static struct resource s5p6442_pdma_resource[] = {
34 [0] = {
35 .start = S5P6442_PA_PDMA,
36 .end = S5P6442_PA_PDMA + SZ_4K,
37 .flags = IORESOURCE_MEM,
38 },
39 [1] = {
40 .start = IRQ_PDMA,
41 .end = IRQ_PDMA,
42 .flags = IORESOURCE_IRQ,
43 },
44};
45
46static struct s3c_pl330_platdata s5p6442_pdma_pdata = {
47 .peri = {
48 [0] = DMACH_UART0_RX,
49 [1] = DMACH_UART0_TX,
50 [2] = DMACH_UART1_RX,
51 [3] = DMACH_UART1_TX,
52 [4] = DMACH_UART2_RX,
53 [5] = DMACH_UART2_TX,
54 [6] = DMACH_MAX,
55 [7] = DMACH_MAX,
56 [8] = DMACH_MAX,
57 [9] = DMACH_I2S0_RX,
58 [10] = DMACH_I2S0_TX,
59 [11] = DMACH_I2S0S_TX,
60 [12] = DMACH_I2S1_RX,
61 [13] = DMACH_I2S1_TX,
62 [14] = DMACH_MAX,
63 [15] = DMACH_MAX,
64 [16] = DMACH_SPI0_RX,
65 [17] = DMACH_SPI0_TX,
66 [18] = DMACH_MAX,
67 [19] = DMACH_MAX,
68 [20] = DMACH_PCM0_RX,
69 [21] = DMACH_PCM0_TX,
70 [22] = DMACH_PCM1_RX,
71 [23] = DMACH_PCM1_TX,
72 [24] = DMACH_MAX,
73 [25] = DMACH_MAX,
74 [26] = DMACH_MAX,
75 [27] = DMACH_MSM_REQ0,
76 [28] = DMACH_MSM_REQ1,
77 [29] = DMACH_MSM_REQ2,
78 [30] = DMACH_MSM_REQ3,
79 [31] = DMACH_MAX,
80 },
81};
82
83static struct platform_device s5p6442_device_pdma = {
84 .name = "s3c-pl330",
85 .id = -1,
86 .num_resources = ARRAY_SIZE(s5p6442_pdma_resource),
87 .resource = s5p6442_pdma_resource,
88 .dev = {
89 .dma_mask = &dma_dmamask,
90 .coherent_dma_mask = DMA_BIT_MASK(32),
91 .platform_data = &s5p6442_pdma_pdata,
92 },
93};
94
95static struct platform_device *s5p6442_dmacs[] __initdata = {
96 &s5p6442_device_pdma,
97};
98
99static int __init s5p6442_dma_init(void)
100{
101 platform_add_devices(s5p6442_dmacs, ARRAY_SIZE(s5p6442_dmacs));
102
103 return 0;
104}
105arch_initcall(s5p6442_dma_init);
diff --git a/arch/arm/mach-s5p6442/include/mach/debug-macro.S b/arch/arm/mach-s5p6442/include/mach/debug-macro.S
deleted file mode 100644
index e2213205d780..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/debug-macro.S
+++ /dev/null
@@ -1,35 +0,0 @@
1/* linux/arch/arm/mach-s5p6442/include/mach/debug-macro.S
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * Based on arch/arm/mach-s3c6400/include/mach/debug-macro.S
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13/* pull in the relevant register and map files. */
14
15#include <mach/map.h>
16#include <plat/regs-serial.h>
17
18 .macro addruart, rp, rv
19 ldr \rp, = S3C_PA_UART
20 ldr \rv, = S3C_VA_UART
21#if CONFIG_DEBUG_S3C_UART != 0
22 add \rp, \rp, #(0x400 * CONFIG_DEBUG_S3C_UART)
23 add \rv, \rv, #(0x400 * CONFIG_DEBUG_S3C_UART)
24#endif
25 .endm
26
27#define fifo_full fifo_full_s5pv210
28#define fifo_level fifo_level_s5pv210
29
30/* include the reset of the code which will do the work, we're only
31 * compiling for a single cpu processor type so the default of s3c2440
32 * will be fine with us.
33 */
34
35#include <plat/debug-macro.S>
diff --git a/arch/arm/mach-s5p6442/include/mach/entry-macro.S b/arch/arm/mach-s5p6442/include/mach/entry-macro.S
deleted file mode 100644
index 6d574edbf1ae..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/entry-macro.S
+++ /dev/null
@@ -1,48 +0,0 @@
1/* linux/arch/arm/mach-s5p6442/include/mach/entry-macro.S
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * Low-level IRQ helper macros for the Samsung S5P6442
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#include <asm/hardware/vic.h>
14#include <mach/map.h>
15#include <plat/irqs.h>
16
17 .macro disable_fiq
18 .endm
19
20 .macro get_irqnr_preamble, base, tmp
21 ldr \base, =VA_VIC0
22 .endm
23
24 .macro arch_ret_to_user, tmp1, tmp2
25 .endm
26
27 .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
28
29 @ check the vic0
30 mov \irqnr, # S5P_IRQ_OFFSET + 31
31 ldr \irqstat, [ \base, # VIC_IRQ_STATUS ]
32 teq \irqstat, #0
33
34 @ otherwise try vic1
35 addeq \tmp, \base, #(VA_VIC1 - VA_VIC0)
36 addeq \irqnr, \irqnr, #32
37 ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
38 teqeq \irqstat, #0
39
40 @ otherwise try vic2
41 addeq \tmp, \base, #(VA_VIC2 - VA_VIC0)
42 addeq \irqnr, \irqnr, #32
43 ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
44 teqeq \irqstat, #0
45
46 clzne \irqstat, \irqstat
47 subne \irqnr, \irqnr, \irqstat
48 .endm
diff --git a/arch/arm/mach-s5p6442/include/mach/gpio.h b/arch/arm/mach-s5p6442/include/mach/gpio.h
deleted file mode 100644
index b8715df2fdab..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/gpio.h
+++ /dev/null
@@ -1,123 +0,0 @@
1/* linux/arch/arm/mach-s5p6442/include/mach/gpio.h
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * S5P6442 - GPIO lib support
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef __ASM_ARCH_GPIO_H
14#define __ASM_ARCH_GPIO_H __FILE__
15
16#define gpio_get_value __gpio_get_value
17#define gpio_set_value __gpio_set_value
18#define gpio_cansleep __gpio_cansleep
19#define gpio_to_irq __gpio_to_irq
20
21/* GPIO bank sizes */
22#define S5P6442_GPIO_A0_NR (8)
23#define S5P6442_GPIO_A1_NR (2)
24#define S5P6442_GPIO_B_NR (4)
25#define S5P6442_GPIO_C0_NR (5)
26#define S5P6442_GPIO_C1_NR (5)
27#define S5P6442_GPIO_D0_NR (2)
28#define S5P6442_GPIO_D1_NR (6)
29#define S5P6442_GPIO_E0_NR (8)
30#define S5P6442_GPIO_E1_NR (5)
31#define S5P6442_GPIO_F0_NR (8)
32#define S5P6442_GPIO_F1_NR (8)
33#define S5P6442_GPIO_F2_NR (8)
34#define S5P6442_GPIO_F3_NR (6)
35#define S5P6442_GPIO_G0_NR (7)
36#define S5P6442_GPIO_G1_NR (7)
37#define S5P6442_GPIO_G2_NR (7)
38#define S5P6442_GPIO_H0_NR (8)
39#define S5P6442_GPIO_H1_NR (8)
40#define S5P6442_GPIO_H2_NR (8)
41#define S5P6442_GPIO_H3_NR (8)
42#define S5P6442_GPIO_J0_NR (8)
43#define S5P6442_GPIO_J1_NR (6)
44#define S5P6442_GPIO_J2_NR (8)
45#define S5P6442_GPIO_J3_NR (8)
46#define S5P6442_GPIO_J4_NR (5)
47
48/* GPIO bank numbers */
49
50/* CONFIG_S3C_GPIO_SPACE allows the user to select extra
51 * space for debugging purposes so that any accidental
52 * change from one gpio bank to another can be caught.
53*/
54
55#define S5P6442_GPIO_NEXT(__gpio) \
56 ((__gpio##_START) + (__gpio##_NR) + CONFIG_S3C_GPIO_SPACE + 1)
57
58enum s5p_gpio_number {
59 S5P6442_GPIO_A0_START = 0,
60 S5P6442_GPIO_A1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_A0),
61 S5P6442_GPIO_B_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_A1),
62 S5P6442_GPIO_C0_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_B),
63 S5P6442_GPIO_C1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_C0),
64 S5P6442_GPIO_D0_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_C1),
65 S5P6442_GPIO_D1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_D0),
66 S5P6442_GPIO_E0_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_D1),
67 S5P6442_GPIO_E1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_E0),
68 S5P6442_GPIO_F0_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_E1),
69 S5P6442_GPIO_F1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_F0),
70 S5P6442_GPIO_F2_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_F1),
71 S5P6442_GPIO_F3_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_F2),
72 S5P6442_GPIO_G0_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_F3),
73 S5P6442_GPIO_G1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_G0),
74 S5P6442_GPIO_G2_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_G1),
75 S5P6442_GPIO_H0_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_G2),
76 S5P6442_GPIO_H1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_H0),
77 S5P6442_GPIO_H2_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_H1),
78 S5P6442_GPIO_H3_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_H2),
79 S5P6442_GPIO_J0_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_H3),
80 S5P6442_GPIO_J1_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_J0),
81 S5P6442_GPIO_J2_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_J1),
82 S5P6442_GPIO_J3_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_J2),
83 S5P6442_GPIO_J4_START = S5P6442_GPIO_NEXT(S5P6442_GPIO_J3),
84};
85
86/* S5P6442 GPIO number definitions. */
87#define S5P6442_GPA0(_nr) (S5P6442_GPIO_A0_START + (_nr))
88#define S5P6442_GPA1(_nr) (S5P6442_GPIO_A1_START + (_nr))
89#define S5P6442_GPB(_nr) (S5P6442_GPIO_B_START + (_nr))
90#define S5P6442_GPC0(_nr) (S5P6442_GPIO_C0_START + (_nr))
91#define S5P6442_GPC1(_nr) (S5P6442_GPIO_C1_START + (_nr))
92#define S5P6442_GPD0(_nr) (S5P6442_GPIO_D0_START + (_nr))
93#define S5P6442_GPD1(_nr) (S5P6442_GPIO_D1_START + (_nr))
94#define S5P6442_GPE0(_nr) (S5P6442_GPIO_E0_START + (_nr))
95#define S5P6442_GPE1(_nr) (S5P6442_GPIO_E1_START + (_nr))
96#define S5P6442_GPF0(_nr) (S5P6442_GPIO_F0_START + (_nr))
97#define S5P6442_GPF1(_nr) (S5P6442_GPIO_F1_START + (_nr))
98#define S5P6442_GPF2(_nr) (S5P6442_GPIO_F2_START + (_nr))
99#define S5P6442_GPF3(_nr) (S5P6442_GPIO_F3_START + (_nr))
100#define S5P6442_GPG0(_nr) (S5P6442_GPIO_G0_START + (_nr))
101#define S5P6442_GPG1(_nr) (S5P6442_GPIO_G1_START + (_nr))
102#define S5P6442_GPG2(_nr) (S5P6442_GPIO_G2_START + (_nr))
103#define S5P6442_GPH0(_nr) (S5P6442_GPIO_H0_START + (_nr))
104#define S5P6442_GPH1(_nr) (S5P6442_GPIO_H1_START + (_nr))
105#define S5P6442_GPH2(_nr) (S5P6442_GPIO_H2_START + (_nr))
106#define S5P6442_GPH3(_nr) (S5P6442_GPIO_H3_START + (_nr))
107#define S5P6442_GPJ0(_nr) (S5P6442_GPIO_J0_START + (_nr))
108#define S5P6442_GPJ1(_nr) (S5P6442_GPIO_J1_START + (_nr))
109#define S5P6442_GPJ2(_nr) (S5P6442_GPIO_J2_START + (_nr))
110#define S5P6442_GPJ3(_nr) (S5P6442_GPIO_J3_START + (_nr))
111#define S5P6442_GPJ4(_nr) (S5P6442_GPIO_J4_START + (_nr))
112
113/* the end of the S5P6442 specific gpios */
114#define S5P6442_GPIO_END (S5P6442_GPJ4(S5P6442_GPIO_J4_NR) + 1)
115#define S3C_GPIO_END S5P6442_GPIO_END
116
117/* define the number of gpios we need to the one after the GPJ4() range */
118#define ARCH_NR_GPIOS (S5P6442_GPJ4(S5P6442_GPIO_J4_NR) + \
119 CONFIG_SAMSUNG_GPIO_EXTRA + 1)
120
121#include <asm-generic/gpio.h>
122
123#endif /* __ASM_ARCH_GPIO_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/hardware.h b/arch/arm/mach-s5p6442/include/mach/hardware.h
deleted file mode 100644
index 8cd7b67b49d4..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/hardware.h
+++ /dev/null
@@ -1,18 +0,0 @@
1/* linux/arch/arm/mach-s5p6442/include/mach/hardware.h
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * S5P6442 - Hardware support
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef __ASM_ARCH_HARDWARE_H
14#define __ASM_ARCH_HARDWARE_H __FILE__
15
16/* currently nothing here, placeholder */
17
18#endif /* __ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/io.h b/arch/arm/mach-s5p6442/include/mach/io.h
deleted file mode 100644
index 5d2195ad0b67..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/io.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/* arch/arm/mach-s5p6442/include/mach/io.h
2 *
3 * Copyright 2008-2010 Ben Dooks <ben-linux@fluff.org>
4 *
5 * Default IO routines for S5P6442
6 */
7
8#ifndef __ASM_ARM_ARCH_IO_H
9#define __ASM_ARM_ARCH_IO_H
10
11/* No current ISA/PCI bus support. */
12#define __io(a) __typesafe_io(a)
13#define __mem_pci(a) (a)
14
15#define IO_SPACE_LIMIT (0xFFFFFFFF)
16
17#endif
diff --git a/arch/arm/mach-s5p6442/include/mach/irqs.h b/arch/arm/mach-s5p6442/include/mach/irqs.h
deleted file mode 100644
index 3fbc6c3ad2da..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/irqs.h
+++ /dev/null
@@ -1,87 +0,0 @@
1/* linux/arch/arm/mach-s5p6442/include/mach/irqs.h
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * S5P6442 - IRQ definitions
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef __ASM_ARCH_IRQS_H
14#define __ASM_ARCH_IRQS_H __FILE__
15
16#include <plat/irqs.h>
17
18/* VIC0 */
19#define IRQ_EINT16_31 S5P_IRQ_VIC0(16)
20#define IRQ_BATF S5P_IRQ_VIC0(17)
21#define IRQ_MDMA S5P_IRQ_VIC0(18)
22#define IRQ_PDMA S5P_IRQ_VIC0(19)
23#define IRQ_TIMER0_VIC S5P_IRQ_VIC0(21)
24#define IRQ_TIMER1_VIC S5P_IRQ_VIC0(22)
25#define IRQ_TIMER2_VIC S5P_IRQ_VIC0(23)
26#define IRQ_TIMER3_VIC S5P_IRQ_VIC0(24)
27#define IRQ_TIMER4_VIC S5P_IRQ_VIC0(25)
28#define IRQ_SYSTIMER S5P_IRQ_VIC0(26)
29#define IRQ_WDT S5P_IRQ_VIC0(27)
30#define IRQ_RTC_ALARM S5P_IRQ_VIC0(28)
31#define IRQ_RTC_TIC S5P_IRQ_VIC0(29)
32#define IRQ_GPIOINT S5P_IRQ_VIC0(30)
33
34/* VIC1 */
35#define IRQ_PMU S5P_IRQ_VIC1(0)
36#define IRQ_ONENAND S5P_IRQ_VIC1(7)
37#define IRQ_UART0 S5P_IRQ_VIC1(10)
38#define IRQ_UART1 S5P_IRQ_VIC1(11)
39#define IRQ_UART2 S5P_IRQ_VIC1(12)
40#define IRQ_SPI0 S5P_IRQ_VIC1(15)
41#define IRQ_IIC S5P_IRQ_VIC1(19)
42#define IRQ_IIC1 S5P_IRQ_VIC1(20)
43#define IRQ_IIC2 S5P_IRQ_VIC1(21)
44#define IRQ_OTG S5P_IRQ_VIC1(24)
45#define IRQ_MSM S5P_IRQ_VIC1(25)
46#define IRQ_HSMMC0 S5P_IRQ_VIC1(26)
47#define IRQ_HSMMC1 S5P_IRQ_VIC1(27)
48#define IRQ_HSMMC2 S5P_IRQ_VIC1(28)
49#define IRQ_COMMRX S5P_IRQ_VIC1(29)
50#define IRQ_COMMTX S5P_IRQ_VIC1(30)
51
52/* VIC2 */
53#define IRQ_LCD0 S5P_IRQ_VIC2(0)
54#define IRQ_LCD1 S5P_IRQ_VIC2(1)
55#define IRQ_LCD2 S5P_IRQ_VIC2(2)
56#define IRQ_LCD3 S5P_IRQ_VIC2(3)
57#define IRQ_ROTATOR S5P_IRQ_VIC2(4)
58#define IRQ_FIMC0 S5P_IRQ_VIC2(5)
59#define IRQ_FIMC1 S5P_IRQ_VIC2(6)
60#define IRQ_FIMC2 S5P_IRQ_VIC2(7)
61#define IRQ_JPEG S5P_IRQ_VIC2(8)
62#define IRQ_3D S5P_IRQ_VIC2(10)
63#define IRQ_Mixer S5P_IRQ_VIC2(11)
64#define IRQ_MFC S5P_IRQ_VIC2(14)
65#define IRQ_TVENC S5P_IRQ_VIC2(15)
66#define IRQ_I2S0 S5P_IRQ_VIC2(16)
67#define IRQ_I2S1 S5P_IRQ_VIC2(17)
68#define IRQ_RP S5P_IRQ_VIC2(19)
69#define IRQ_PCM0 S5P_IRQ_VIC2(20)
70#define IRQ_PCM1 S5P_IRQ_VIC2(21)
71#define IRQ_ADC S5P_IRQ_VIC2(23)
72#define IRQ_PENDN S5P_IRQ_VIC2(24)
73#define IRQ_KEYPAD S5P_IRQ_VIC2(25)
74#define IRQ_SSS_INT S5P_IRQ_VIC2(27)
75#define IRQ_SSS_HASH S5P_IRQ_VIC2(28)
76#define IRQ_VIC_END S5P_IRQ_VIC2(31)
77
78#define S5P_IRQ_EINT_BASE (IRQ_VIC_END + 1)
79
80#define S5P_EINT_BASE1 (S5P_IRQ_VIC0(0))
81#define S5P_EINT_BASE2 (S5P_IRQ_EINT_BASE)
82
83/* Set the default NR_IRQS */
84
85#define NR_IRQS (IRQ_EINT(31) + 1)
86
87#endif /* __ASM_ARCH_IRQS_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/map.h b/arch/arm/mach-s5p6442/include/mach/map.h
deleted file mode 100644
index 058dab4482a1..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/map.h
+++ /dev/null
@@ -1,76 +0,0 @@
1/* linux/arch/arm/mach-s5p6442/include/mach/map.h
2 *
3 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * S5P6442 - Memory map definitions
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef __ASM_ARCH_MAP_H
14#define __ASM_ARCH_MAP_H __FILE__
15
16#include <plat/map-base.h>
17#include <plat/map-s5p.h>
18
19#define S5P6442_PA_SDRAM 0x20000000
20
21#define S5P6442_PA_I2S0 0xC0B00000
22#define S5P6442_PA_I2S1 0xF2200000
23
24#define S5P6442_PA_CHIPID 0xE0000000
25
26#define S5P6442_PA_SYSCON 0xE0100000
27
28#define S5P6442_PA_GPIO 0xE0200000
29
30#define S5P6442_PA_VIC0 0xE4000000
31#define S5P6442_PA_VIC1 0xE4100000
32#define S5P6442_PA_VIC2 0xE4200000
33
34#define S5P6442_PA_SROMC 0xE7000000
35
36#define S5P6442_PA_MDMA 0xE8000000
37#define S5P6442_PA_PDMA 0xE9000000
38
39#define S5P6442_PA_TIMER 0xEA000000
40
41#define S5P6442_PA_SYSTIMER 0xEA100000
42
43#define S5P6442_PA_WATCHDOG 0xEA200000
44
45#define S5P6442_PA_UART 0xEC000000
46
47#define S5P6442_PA_IIC0 0xEC100000
48
49#define S5P6442_PA_SPI 0xEC300000
50
51#define S5P6442_PA_PCM0 0xF2400000
52#define S5P6442_PA_PCM1 0xF2500000
53
54/* Compatibiltiy Defines */
55
56#define S3C_PA_IIC S5P6442_PA_IIC0
57#define S3C_PA_WDT S5P6442_PA_WATCHDOG
58
59#define S5P_PA_CHIPID S5P6442_PA_CHIPID
60#define S5P_PA_SDRAM S5P6442_PA_SDRAM
61#define S5P_PA_SROMC S5P6442_PA_SROMC
62#define S5P_PA_SYSCON S5P6442_PA_SYSCON
63#define S5P_PA_TIMER S5P6442_PA_TIMER
64
65/* UART */
66
67#define S3C_PA_UART S5P6442_PA_UART
68
69#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
70#define S5P_PA_UART0 S5P_PA_UART(0)
71#define S5P_PA_UART1 S5P_PA_UART(1)
72#define S5P_PA_UART2 S5P_PA_UART(2)
73
74#define S5P_SZ_UART SZ_256
75
76#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/memory.h b/arch/arm/mach-s5p6442/include/mach/memory.h
deleted file mode 100644
index cfe259dded33..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/memory.h
+++ /dev/null
@@ -1,19 +0,0 @@
1/* linux/arch/arm/mach-s5p6442/include/mach/memory.h
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * S5P6442 - Memory definitions
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef __ASM_ARCH_MEMORY_H
14#define __ASM_ARCH_MEMORY_H
15
16#define PLAT_PHYS_OFFSET UL(0x20000000)
17#define CONSISTENT_DMA_SIZE SZ_8M
18
19#endif /* __ASM_ARCH_MEMORY_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/pwm-clock.h b/arch/arm/mach-s5p6442/include/mach/pwm-clock.h
deleted file mode 100644
index 2724b37def31..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/pwm-clock.h
+++ /dev/null
@@ -1,70 +0,0 @@
1/* linux/arch/arm/mach-s5p6442/include/mach/pwm-clock.h
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * Copyright 2008 Openmoko, Inc.
7 * Copyright 2008 Simtec Electronics
8 * Ben Dooks <ben@simtec.co.uk>
9 * http://armlinux.simtec.co.uk/
10 *
11 * Based on arch/arm/mach-s3c64xx/include/mach/pwm-clock.h
12 *
13 * S5P6442 - pwm clock and timer support
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2 as
17 * published by the Free Software Foundation.
18*/
19
20#ifndef __ASM_ARCH_PWMCLK_H
21#define __ASM_ARCH_PWMCLK_H __FILE__
22
23/**
24 * pwm_cfg_src_is_tclk() - return whether the given mux config is a tclk
25 * @tcfg: The timer TCFG1 register bits shifted down to 0.
26 *
27 * Return true if the given configuration from TCFG1 is a TCLK instead
28 * any of the TDIV clocks.
29 */
30static inline int pwm_cfg_src_is_tclk(unsigned long tcfg)
31{
32 return tcfg == S3C64XX_TCFG1_MUX_TCLK;
33}
34
35/**
36 * tcfg_to_divisor() - convert tcfg1 setting to a divisor
37 * @tcfg1: The tcfg1 setting, shifted down.
38 *
39 * Get the divisor value for the given tcfg1 setting. We assume the
40 * caller has already checked to see if this is not a TCLK source.
41 */
42static inline unsigned long tcfg_to_divisor(unsigned long tcfg1)
43{
44 return 1 << tcfg1;
45}
46
47/**
48 * pwm_tdiv_has_div1() - does the tdiv setting have a /1
49 *
50 * Return true if we have a /1 in the tdiv setting.
51 */
52static inline unsigned int pwm_tdiv_has_div1(void)
53{
54 return 1;
55}
56
57/**
58 * pwm_tdiv_div_bits() - calculate TCFG1 divisor value.
59 * @div: The divisor to calculate the bit information for.
60 *
61 * Turn a divisor into the necessary bit field for TCFG1.
62 */
63static inline unsigned long pwm_tdiv_div_bits(unsigned int div)
64{
65 return ilog2(div);
66}
67
68#define S3C_TCFG1_MUX_TCLK S3C64XX_TCFG1_MUX_TCLK
69
70#endif /* __ASM_ARCH_PWMCLK_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/regs-clock.h b/arch/arm/mach-s5p6442/include/mach/regs-clock.h
deleted file mode 100644
index 00828a336991..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/regs-clock.h
+++ /dev/null
@@ -1,104 +0,0 @@
1/* linux/arch/arm/mach-s5p6442/include/mach/regs-clock.h
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * S5P6442 - Clock register definitions
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef __ASM_ARCH_REGS_CLOCK_H
14#define __ASM_ARCH_REGS_CLOCK_H __FILE__
15
16#include <mach/map.h>
17
18#define S5P_CLKREG(x) (S3C_VA_SYS + (x))
19
20#define S5P_APLL_LOCK S5P_CLKREG(0x00)
21#define S5P_MPLL_LOCK S5P_CLKREG(0x08)
22#define S5P_EPLL_LOCK S5P_CLKREG(0x10)
23#define S5P_VPLL_LOCK S5P_CLKREG(0x20)
24
25#define S5P_APLL_CON S5P_CLKREG(0x100)
26#define S5P_MPLL_CON S5P_CLKREG(0x108)
27#define S5P_EPLL_CON S5P_CLKREG(0x110)
28#define S5P_VPLL_CON S5P_CLKREG(0x120)
29
30#define S5P_CLK_SRC0 S5P_CLKREG(0x200)
31#define S5P_CLK_SRC1 S5P_CLKREG(0x204)
32#define S5P_CLK_SRC2 S5P_CLKREG(0x208)
33#define S5P_CLK_SRC3 S5P_CLKREG(0x20C)
34#define S5P_CLK_SRC4 S5P_CLKREG(0x210)
35#define S5P_CLK_SRC5 S5P_CLKREG(0x214)
36#define S5P_CLK_SRC6 S5P_CLKREG(0x218)
37
38#define S5P_CLK_SRC_MASK0 S5P_CLKREG(0x280)
39#define S5P_CLK_SRC_MASK1 S5P_CLKREG(0x284)
40
41#define S5P_CLK_DIV0 S5P_CLKREG(0x300)
42#define S5P_CLK_DIV1 S5P_CLKREG(0x304)
43#define S5P_CLK_DIV2 S5P_CLKREG(0x308)
44#define S5P_CLK_DIV3 S5P_CLKREG(0x30C)
45#define S5P_CLK_DIV4 S5P_CLKREG(0x310)
46#define S5P_CLK_DIV5 S5P_CLKREG(0x314)
47#define S5P_CLK_DIV6 S5P_CLKREG(0x318)
48
49#define S5P_CLKGATE_IP0 S5P_CLKREG(0x460)
50#define S5P_CLKGATE_IP3 S5P_CLKREG(0x46C)
51
52/* CLK_OUT */
53#define S5P_CLK_OUT_SHIFT (12)
54#define S5P_CLK_OUT_MASK (0x1F << S5P_CLK_OUT_SHIFT)
55#define S5P_CLK_OUT S5P_CLKREG(0x500)
56
57#define S5P_CLK_DIV_STAT0 S5P_CLKREG(0x1000)
58#define S5P_CLK_DIV_STAT1 S5P_CLKREG(0x1004)
59
60#define S5P_CLK_MUX_STAT0 S5P_CLKREG(0x1100)
61#define S5P_CLK_MUX_STAT1 S5P_CLKREG(0x1104)
62
63#define S5P_MDNIE_SEL S5P_CLKREG(0x7008)
64
65/* Register Bit definition */
66#define S5P_EPLL_EN (1<<31)
67#define S5P_EPLL_MASK 0xffffffff
68#define S5P_EPLLVAL(_m, _p, _s) ((_m) << 16 | ((_p) << 8) | ((_s)))
69
70/* CLKDIV0 */
71#define S5P_CLKDIV0_APLL_SHIFT (0)
72#define S5P_CLKDIV0_APLL_MASK (0x7 << S5P_CLKDIV0_APLL_SHIFT)
73#define S5P_CLKDIV0_A2M_SHIFT (4)
74#define S5P_CLKDIV0_A2M_MASK (0x7 << S5P_CLKDIV0_A2M_SHIFT)
75#define S5P_CLKDIV0_D0CLK_SHIFT (16)
76#define S5P_CLKDIV0_D0CLK_MASK (0xF << S5P_CLKDIV0_D0CLK_SHIFT)
77#define S5P_CLKDIV0_P0CLK_SHIFT (20)
78#define S5P_CLKDIV0_P0CLK_MASK (0x7 << S5P_CLKDIV0_P0CLK_SHIFT)
79#define S5P_CLKDIV0_D1CLK_SHIFT (24)
80#define S5P_CLKDIV0_D1CLK_MASK (0xF << S5P_CLKDIV0_D1CLK_SHIFT)
81#define S5P_CLKDIV0_P1CLK_SHIFT (28)
82#define S5P_CLKDIV0_P1CLK_MASK (0x7 << S5P_CLKDIV0_P1CLK_SHIFT)
83
84/* Clock MUX status Registers */
85#define S5P_CLK_MUX_STAT0_APLL_SHIFT (0)
86#define S5P_CLK_MUX_STAT0_APLL_MASK (0x7 << S5P_CLK_MUX_STAT0_APLL_SHIFT)
87#define S5P_CLK_MUX_STAT0_MPLL_SHIFT (4)
88#define S5P_CLK_MUX_STAT0_MPLL_MASK (0x7 << S5P_CLK_MUX_STAT0_MPLL_SHIFT)
89#define S5P_CLK_MUX_STAT0_EPLL_SHIFT (8)
90#define S5P_CLK_MUX_STAT0_EPLL_MASK (0x7 << S5P_CLK_MUX_STAT0_EPLL_SHIFT)
91#define S5P_CLK_MUX_STAT0_VPLL_SHIFT (12)
92#define S5P_CLK_MUX_STAT0_VPLL_MASK (0x7 << S5P_CLK_MUX_STAT0_VPLL_SHIFT)
93#define S5P_CLK_MUX_STAT0_MUXARM_SHIFT (16)
94#define S5P_CLK_MUX_STAT0_MUXARM_MASK (0x7 << S5P_CLK_MUX_STAT0_MUXARM_SHIFT)
95#define S5P_CLK_MUX_STAT0_MUXD0_SHIFT (20)
96#define S5P_CLK_MUX_STAT0_MUXD0_MASK (0x7 << S5P_CLK_MUX_STAT0_MUXD0_SHIFT)
97#define S5P_CLK_MUX_STAT0_MUXD1_SHIFT (24)
98#define S5P_CLK_MUX_STAT0_MUXD1_MASK (0x7 << S5P_CLK_MUX_STAT0_MUXD1_SHIFT)
99#define S5P_CLK_MUX_STAT1_D1SYNC_SHIFT (24)
100#define S5P_CLK_MUX_STAT1_D1SYNC_MASK (0x7 << S5P_CLK_MUX_STAT1_D1SYNC_SHIFT)
101#define S5P_CLK_MUX_STAT1_D0SYNC_SHIFT (28)
102#define S5P_CLK_MUX_STAT1_D0SYNC_MASK (0x7 << S5P_CLK_MUX_STAT1_D0SYNC_SHIFT)
103
104#endif /* __ASM_ARCH_REGS_CLOCK_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/regs-irq.h b/arch/arm/mach-s5p6442/include/mach/regs-irq.h
deleted file mode 100644
index 73782b52a83b..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/regs-irq.h
+++ /dev/null
@@ -1,19 +0,0 @@
1/* linux/arch/arm/mach-s5p6442/include/mach/regs-irq.h
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * S5P6442 - IRQ register definitions
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef __ASM_ARCH_REGS_IRQ_H
14#define __ASM_ARCH_REGS_IRQ_H __FILE__
15
16#include <asm/hardware/vic.h>
17#include <mach/map.h>
18
19#endif /* __ASM_ARCH_REGS_IRQ_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/spi-clocks.h b/arch/arm/mach-s5p6442/include/mach/spi-clocks.h
deleted file mode 100644
index 7fd88205a97c..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/spi-clocks.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/* linux/arch/arm/mach-s5p6442/include/mach/spi-clocks.h
2 *
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __S5P6442_PLAT_SPI_CLKS_H
12#define __S5P6442_PLAT_SPI_CLKS_H __FILE__
13
14#define S5P6442_SPI_SRCCLK_PCLK 0
15#define S5P6442_SPI_SRCCLK_SCLK 1
16
17#endif /* __S5P6442_PLAT_SPI_CLKS_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/system.h b/arch/arm/mach-s5p6442/include/mach/system.h
deleted file mode 100644
index c30c1cc1b97e..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/system.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/* linux/arch/arm/mach-s5p6442/include/mach/system.h
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * S5P6442 - system support header
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef __ASM_ARCH_SYSTEM_H
14#define __ASM_ARCH_SYSTEM_H __FILE__
15
16#include <plat/system-reset.h>
17
18static void arch_idle(void)
19{
20 /* nothing here yet */
21}
22
23#endif /* __ASM_ARCH_SYSTEM_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/tick.h b/arch/arm/mach-s5p6442/include/mach/tick.h
deleted file mode 100644
index e1d4cabf8297..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/tick.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/* linux/arch/arm/mach-s5p6442/include/mach/tick.h
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * Based on arch/arm/mach-s3c6400/include/mach/tick.h
7 *
8 * S5P6442 - Timer tick support definitions
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#ifndef __ASM_ARCH_TICK_H
16#define __ASM_ARCH_TICK_H __FILE__
17
18static inline u32 s3c24xx_ostimer_pending(void)
19{
20 u32 pend = __raw_readl(VA_VIC0 + VIC_RAW_STATUS);
21 return pend & (1 << (IRQ_TIMER4_VIC - S5P_IRQ_VIC0(0)));
22}
23
24#define TICK_MAX (0xffffffff)
25
26#endif /* __ASM_ARCH_TICK_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/timex.h b/arch/arm/mach-s5p6442/include/mach/timex.h
deleted file mode 100644
index ff8f2fcadeb7..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/timex.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/* arch/arm/mach-s5p6442/include/mach/timex.h
2 *
3 * Copyright (c) 2003-2010 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * S5P6442 - time parameters
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef __ASM_ARCH_TIMEX_H
14#define __ASM_ARCH_TIMEX_H
15
16/* CLOCK_TICK_RATE needs to be evaluatable by the cpp, so making it
17 * a variable is useless. It seems as long as we make our timers an
18 * exact multiple of HZ, any value that makes a 1->1 correspondence
19 * for the time conversion functions to/from jiffies is acceptable.
20*/
21
22#define CLOCK_TICK_RATE 12000000
23
24#endif /* __ASM_ARCH_TIMEX_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/uncompress.h b/arch/arm/mach-s5p6442/include/mach/uncompress.h
deleted file mode 100644
index 5ac7cbeeb987..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/uncompress.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/* linux/arch/arm/mach-s5p6442/include/mach/uncompress.h
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * S5P6442 - uncompress code
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef __ASM_ARCH_UNCOMPRESS_H
14#define __ASM_ARCH_UNCOMPRESS_H
15
16#include <mach/map.h>
17#include <plat/uncompress.h>
18
19static void arch_detect_cpu(void)
20{
21 /* we do not need to do any cpu detection here at the moment. */
22}
23
24#endif /* __ASM_ARCH_UNCOMPRESS_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/vmalloc.h b/arch/arm/mach-s5p6442/include/mach/vmalloc.h
deleted file mode 100644
index 4aa55e55ac47..000000000000
--- a/arch/arm/mach-s5p6442/include/mach/vmalloc.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/* arch/arm/mach-s5p6442/include/mach/vmalloc.h
2 *
3 * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * S5P6442 vmalloc definition
10*/
11
12#ifndef __ASM_ARCH_VMALLOC_H
13#define __ASM_ARCH_VMALLOC_H
14
15#define VMALLOC_END 0xF6000000UL
16
17#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5p6442/init.c b/arch/arm/mach-s5p6442/init.c
deleted file mode 100644
index 1874bdb71e1d..000000000000
--- a/arch/arm/mach-s5p6442/init.c
+++ /dev/null
@@ -1,44 +0,0 @@
1/* linux/arch/arm/mach-s5p6442/s5p6442-init.c
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9*/
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/init.h>
14#include <linux/serial_core.h>
15
16#include <plat/cpu.h>
17#include <plat/devs.h>
18#include <plat/s5p6442.h>
19#include <plat/regs-serial.h>
20
21static struct s3c24xx_uart_clksrc s5p6442_serial_clocks[] = {
22 [0] = {
23 .name = "pclk",
24 .divisor = 1,
25 .min_baud = 0,
26 .max_baud = 0,
27 },
28};
29
30/* uart registration process */
31void __init s5p6442_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
32{
33 struct s3c2410_uartcfg *tcfg = cfg;
34 u32 ucnt;
35
36 for (ucnt = 0; ucnt < no; ucnt++, tcfg++) {
37 if (!tcfg->clocks) {
38 tcfg->clocks = s5p6442_serial_clocks;
39 tcfg->clocks_size = ARRAY_SIZE(s5p6442_serial_clocks);
40 }
41 }
42
43 s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no);
44}
diff --git a/arch/arm/mach-s5p6442/mach-smdk6442.c b/arch/arm/mach-s5p6442/mach-smdk6442.c
deleted file mode 100644
index eaf6b9c489ff..000000000000
--- a/arch/arm/mach-s5p6442/mach-smdk6442.c
+++ /dev/null
@@ -1,102 +0,0 @@
1/* linux/arch/arm/mach-s5p6442/mach-smdk6442.c
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9*/
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/init.h>
14#include <linux/serial_core.h>
15#include <linux/i2c.h>
16
17#include <asm/mach/arch.h>
18#include <asm/mach/map.h>
19#include <asm/setup.h>
20#include <asm/mach-types.h>
21
22#include <mach/map.h>
23#include <mach/regs-clock.h>
24
25#include <plat/regs-serial.h>
26#include <plat/s5p6442.h>
27#include <plat/devs.h>
28#include <plat/cpu.h>
29#include <plat/iic.h>
30
31/* Following are default values for UCON, ULCON and UFCON UART registers */
32#define SMDK6442_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
33 S3C2410_UCON_RXILEVEL | \
34 S3C2410_UCON_TXIRQMODE | \
35 S3C2410_UCON_RXIRQMODE | \
36 S3C2410_UCON_RXFIFO_TOI | \
37 S3C2443_UCON_RXERR_IRQEN)
38
39#define SMDK6442_ULCON_DEFAULT S3C2410_LCON_CS8
40
41#define SMDK6442_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
42 S5PV210_UFCON_TXTRIG4 | \
43 S5PV210_UFCON_RXTRIG4)
44
45static struct s3c2410_uartcfg smdk6442_uartcfgs[] __initdata = {
46 [0] = {
47 .hwport = 0,
48 .flags = 0,
49 .ucon = SMDK6442_UCON_DEFAULT,
50 .ulcon = SMDK6442_ULCON_DEFAULT,
51 .ufcon = SMDK6442_UFCON_DEFAULT,
52 },
53 [1] = {
54 .hwport = 1,
55 .flags = 0,
56 .ucon = SMDK6442_UCON_DEFAULT,
57 .ulcon = SMDK6442_ULCON_DEFAULT,
58 .ufcon = SMDK6442_UFCON_DEFAULT,
59 },
60 [2] = {
61 .hwport = 2,
62 .flags = 0,
63 .ucon = SMDK6442_UCON_DEFAULT,
64 .ulcon = SMDK6442_ULCON_DEFAULT,
65 .ufcon = SMDK6442_UFCON_DEFAULT,
66 },
67};
68
69static struct platform_device *smdk6442_devices[] __initdata = {
70 &s3c_device_i2c0,
71 &samsung_asoc_dma,
72 &s5p6442_device_iis0,
73 &s3c_device_wdt,
74};
75
76static struct i2c_board_info smdk6442_i2c_devs0[] __initdata = {
77 { I2C_BOARD_INFO("wm8580", 0x1b), },
78};
79
80static void __init smdk6442_map_io(void)
81{
82 s5p_init_io(NULL, 0, S5P_VA_CHIPID);
83 s3c24xx_init_clocks(12000000);
84 s3c24xx_init_uarts(smdk6442_uartcfgs, ARRAY_SIZE(smdk6442_uartcfgs));
85}
86
87static void __init smdk6442_machine_init(void)
88{
89 s3c_i2c0_set_platdata(NULL);
90 i2c_register_board_info(0, smdk6442_i2c_devs0,
91 ARRAY_SIZE(smdk6442_i2c_devs0));
92 platform_add_devices(smdk6442_devices, ARRAY_SIZE(smdk6442_devices));
93}
94
95MACHINE_START(SMDK6442, "SMDK6442")
96 /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
97 .boot_params = S5P_PA_SDRAM + 0x100,
98 .init_irq = s5p6442_init_irq,
99 .map_io = smdk6442_map_io,
100 .init_machine = smdk6442_machine_init,
101 .timer = &s3c24xx_timer,
102MACHINE_END
diff --git a/arch/arm/mach-s5p6442/setup-i2c0.c b/arch/arm/mach-s5p6442/setup-i2c0.c
deleted file mode 100644
index aad85656b0cc..000000000000
--- a/arch/arm/mach-s5p6442/setup-i2c0.c
+++ /dev/null
@@ -1,28 +0,0 @@
1/* linux/arch/arm/mach-s5p6442/setup-i2c0.c
2 *
3 * Copyright (c) 2009 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * I2C0 GPIO configuration.
7 *
8 * Based on plat-s3c64xx/setup-i2c0.c
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/gpio.h>
18
19struct platform_device; /* don't need the contents */
20
21#include <plat/gpio-cfg.h>
22#include <plat/iic.h>
23
24void s3c_i2c0_cfg_gpio(struct platform_device *dev)
25{
26 s3c_gpio_cfgall_range(S5P6442_GPD1(0), 2,
27 S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
28}
diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c
index bf0b02414e5b..7c6cb4fa47a9 100644
--- a/arch/arm/mach-ux500/board-mop500-sdi.c
+++ b/arch/arm/mach-ux500/board-mop500-sdi.c
@@ -99,8 +99,11 @@ static void sdi0_configure(void)
99 gpio_direction_output(sdi0_vsel, 0); 99 gpio_direction_output(sdi0_vsel, 0);
100 gpio_direction_output(sdi0_en, 1); 100 gpio_direction_output(sdi0_en, 1);
101 101
102 /* Add the device */ 102 /* Add the device, force v2 to subrevision 1 */
103 db8500_add_sdi0(&mop500_sdi0_data); 103 if (cpu_is_u8500v2())
104 db8500_add_sdi0(&mop500_sdi0_data, 0x10480180);
105 else
106 db8500_add_sdi0(&mop500_sdi0_data, 0);
104} 107}
105 108
106void mop500_sdi_tc35892_init(void) 109void mop500_sdi_tc35892_init(void)
@@ -188,13 +191,18 @@ static struct mmci_platform_data mop500_sdi4_data = {
188 191
189void __init mop500_sdi_init(void) 192void __init mop500_sdi_init(void)
190{ 193{
194 u32 periphid = 0;
195
196 /* v2 has a new version of this block that need to be forced */
197 if (cpu_is_u8500v2())
198 periphid = 0x10480180;
191 /* PoP:ed eMMC on top of DB8500 v1.0 has problems with high speed */ 199 /* PoP:ed eMMC on top of DB8500 v1.0 has problems with high speed */
192 if (!cpu_is_u8500v10()) 200 if (!cpu_is_u8500v10())
193 mop500_sdi2_data.capabilities |= MMC_CAP_MMC_HIGHSPEED; 201 mop500_sdi2_data.capabilities |= MMC_CAP_MMC_HIGHSPEED;
194 db8500_add_sdi2(&mop500_sdi2_data); 202 db8500_add_sdi2(&mop500_sdi2_data, periphid);
195 203
196 /* On-board eMMC */ 204 /* On-board eMMC */
197 db8500_add_sdi4(&mop500_sdi4_data); 205 db8500_add_sdi4(&mop500_sdi4_data, periphid);
198 206
199 if (machine_is_hrefv60()) { 207 if (machine_is_hrefv60()) {
200 mop500_sdi0_data.gpio_cd = HREFV60_SDMMC_CD_GPIO; 208 mop500_sdi0_data.gpio_cd = HREFV60_SDMMC_CD_GPIO;
diff --git a/arch/arm/mach-ux500/devices-common.h b/arch/arm/mach-ux500/devices-common.h
index c719b5a1d913..7825705033bf 100644
--- a/arch/arm/mach-ux500/devices-common.h
+++ b/arch/arm/mach-ux500/devices-common.h
@@ -28,18 +28,20 @@ dbx500_add_msp_spi(const char *name, resource_size_t base, int irq,
28 28
29static inline struct amba_device * 29static inline struct amba_device *
30dbx500_add_spi(const char *name, resource_size_t base, int irq, 30dbx500_add_spi(const char *name, resource_size_t base, int irq,
31 struct spi_master_cntlr *pdata) 31 struct spi_master_cntlr *pdata,
32 u32 periphid)
32{ 33{
33 return dbx500_add_amba_device(name, base, irq, pdata, 0); 34 return dbx500_add_amba_device(name, base, irq, pdata, periphid);
34} 35}
35 36
36struct mmci_platform_data; 37struct mmci_platform_data;
37 38
38static inline struct amba_device * 39static inline struct amba_device *
39dbx500_add_sdi(const char *name, resource_size_t base, int irq, 40dbx500_add_sdi(const char *name, resource_size_t base, int irq,
40 struct mmci_platform_data *pdata) 41 struct mmci_platform_data *pdata,
42 u32 periphid)
41{ 43{
42 return dbx500_add_amba_device(name, base, irq, pdata, 0); 44 return dbx500_add_amba_device(name, base, irq, pdata, periphid);
43} 45}
44 46
45struct amba_pl011_data; 47struct amba_pl011_data;
diff --git a/arch/arm/mach-ux500/devices-db5500.h b/arch/arm/mach-ux500/devices-db5500.h
index 94627f7783b0..0c4bccd02b90 100644
--- a/arch/arm/mach-ux500/devices-db5500.h
+++ b/arch/arm/mach-ux500/devices-db5500.h
@@ -38,24 +38,34 @@
38 ux500_add_usb(U5500_USBOTG_BASE, IRQ_DB5500_USBOTG, rx_cfg, tx_cfg) 38 ux500_add_usb(U5500_USBOTG_BASE, IRQ_DB5500_USBOTG, rx_cfg, tx_cfg)
39 39
40#define db5500_add_sdi0(pdata) \ 40#define db5500_add_sdi0(pdata) \
41 dbx500_add_sdi("sdi0", U5500_SDI0_BASE, IRQ_DB5500_SDMMC0, pdata) 41 dbx500_add_sdi("sdi0", U5500_SDI0_BASE, IRQ_DB5500_SDMMC0, pdata, \
42 0x10480180)
42#define db5500_add_sdi1(pdata) \ 43#define db5500_add_sdi1(pdata) \
43 dbx500_add_sdi("sdi1", U5500_SDI1_BASE, IRQ_DB5500_SDMMC1, pdata) 44 dbx500_add_sdi("sdi1", U5500_SDI1_BASE, IRQ_DB5500_SDMMC1, pdata, \
45 0x10480180)
44#define db5500_add_sdi2(pdata) \ 46#define db5500_add_sdi2(pdata) \
45 dbx500_add_sdi("sdi2", U5500_SDI2_BASE, IRQ_DB5500_SDMMC2, pdata) 47 dbx500_add_sdi("sdi2", U5500_SDI2_BASE, IRQ_DB5500_SDMMC2, pdata \
48 0x10480180)
46#define db5500_add_sdi3(pdata) \ 49#define db5500_add_sdi3(pdata) \
47 dbx500_add_sdi("sdi3", U5500_SDI3_BASE, IRQ_DB5500_SDMMC3, pdata) 50 dbx500_add_sdi("sdi3", U5500_SDI3_BASE, IRQ_DB5500_SDMMC3, pdata \
51 0x10480180)
48#define db5500_add_sdi4(pdata) \ 52#define db5500_add_sdi4(pdata) \
49 dbx500_add_sdi("sdi4", U5500_SDI4_BASE, IRQ_DB5500_SDMMC4, pdata) 53 dbx500_add_sdi("sdi4", U5500_SDI4_BASE, IRQ_DB5500_SDMMC4, pdata \
54 0x10480180)
50 55
56/* This one has a bad peripheral ID in the U5500 silicon */
51#define db5500_add_spi0(pdata) \ 57#define db5500_add_spi0(pdata) \
52 dbx500_add_spi("spi0", U5500_SPI0_BASE, IRQ_DB5500_SPI0, pdata) 58 dbx500_add_spi("spi0", U5500_SPI0_BASE, IRQ_DB5500_SPI0, pdata, \
59 0x10080023)
53#define db5500_add_spi1(pdata) \ 60#define db5500_add_spi1(pdata) \
54 dbx500_add_spi("spi1", U5500_SPI1_BASE, IRQ_DB5500_SPI1, pdata) 61 dbx500_add_spi("spi1", U5500_SPI1_BASE, IRQ_DB5500_SPI1, pdata, \
62 0x10080023)
55#define db5500_add_spi2(pdata) \ 63#define db5500_add_spi2(pdata) \
56 dbx500_add_spi("spi2", U5500_SPI2_BASE, IRQ_DB5500_SPI2, pdata) 64 dbx500_add_spi("spi2", U5500_SPI2_BASE, IRQ_DB5500_SPI2, pdata \
65 0x10080023)
57#define db5500_add_spi3(pdata) \ 66#define db5500_add_spi3(pdata) \
58 dbx500_add_spi("spi3", U5500_SPI3_BASE, IRQ_DB5500_SPI3, pdata) 67 dbx500_add_spi("spi3", U5500_SPI3_BASE, IRQ_DB5500_SPI3, pdata \
68 0x10080023)
59 69
60#define db5500_add_uart0(plat) \ 70#define db5500_add_uart0(plat) \
61 dbx500_add_uart("uart0", U5500_UART0_BASE, IRQ_DB5500_UART0, plat) 71 dbx500_add_uart("uart0", U5500_UART0_BASE, IRQ_DB5500_UART0, plat)
diff --git a/arch/arm/mach-ux500/devices-db8500.h b/arch/arm/mach-ux500/devices-db8500.h
index 9cc6f8f5d3e6..cbd4a9ae8109 100644
--- a/arch/arm/mach-ux500/devices-db8500.h
+++ b/arch/arm/mach-ux500/devices-db8500.h
@@ -25,7 +25,7 @@ static inline struct amba_device *
25db8500_add_ssp(const char *name, resource_size_t base, int irq, 25db8500_add_ssp(const char *name, resource_size_t base, int irq,
26 struct pl022_ssp_controller *pdata) 26 struct pl022_ssp_controller *pdata)
27{ 27{
28 return dbx500_add_amba_device(name, base, irq, pdata, SSP_PER_ID); 28 return dbx500_add_amba_device(name, base, irq, pdata, 0);
29} 29}
30 30
31 31
@@ -64,18 +64,18 @@ db8500_add_ssp(const char *name, resource_size_t base, int irq,
64#define db8500_add_usb(rx_cfg, tx_cfg) \ 64#define db8500_add_usb(rx_cfg, tx_cfg) \
65 ux500_add_usb(U8500_USBOTG_BASE, IRQ_DB8500_USBOTG, rx_cfg, tx_cfg) 65 ux500_add_usb(U8500_USBOTG_BASE, IRQ_DB8500_USBOTG, rx_cfg, tx_cfg)
66 66
67#define db8500_add_sdi0(pdata) \ 67#define db8500_add_sdi0(pdata, pid) \
68 dbx500_add_sdi("sdi0", U8500_SDI0_BASE, IRQ_DB8500_SDMMC0, pdata) 68 dbx500_add_sdi("sdi0", U8500_SDI0_BASE, IRQ_DB8500_SDMMC0, pdata, pid)
69#define db8500_add_sdi1(pdata) \ 69#define db8500_add_sdi1(pdata, pid) \
70 dbx500_add_sdi("sdi1", U8500_SDI1_BASE, IRQ_DB8500_SDMMC1, pdata) 70 dbx500_add_sdi("sdi1", U8500_SDI1_BASE, IRQ_DB8500_SDMMC1, pdata, pid)
71#define db8500_add_sdi2(pdata) \ 71#define db8500_add_sdi2(pdata, pid) \
72 dbx500_add_sdi("sdi2", U8500_SDI2_BASE, IRQ_DB8500_SDMMC2, pdata) 72 dbx500_add_sdi("sdi2", U8500_SDI2_BASE, IRQ_DB8500_SDMMC2, pdata, pid)
73#define db8500_add_sdi3(pdata) \ 73#define db8500_add_sdi3(pdata, pid) \
74 dbx500_add_sdi("sdi3", U8500_SDI3_BASE, IRQ_DB8500_SDMMC3, pdata) 74 dbx500_add_sdi("sdi3", U8500_SDI3_BASE, IRQ_DB8500_SDMMC3, pdata, pid)
75#define db8500_add_sdi4(pdata) \ 75#define db8500_add_sdi4(pdata, pid) \
76 dbx500_add_sdi("sdi4", U8500_SDI4_BASE, IRQ_DB8500_SDMMC4, pdata) 76 dbx500_add_sdi("sdi4", U8500_SDI4_BASE, IRQ_DB8500_SDMMC4, pdata, pid)
77#define db8500_add_sdi5(pdata) \ 77#define db8500_add_sdi5(pdata, pid) \
78 dbx500_add_sdi("sdi5", U8500_SDI5_BASE, IRQ_DB8500_SDMMC5, pdata) 78 dbx500_add_sdi("sdi5", U8500_SDI5_BASE, IRQ_DB8500_SDMMC5, pdata, pid)
79 79
80#define db8500_add_ssp0(pdata) \ 80#define db8500_add_ssp0(pdata) \
81 db8500_add_ssp("ssp0", U8500_SSP0_BASE, IRQ_DB8500_SSP0, pdata) 81 db8500_add_ssp("ssp0", U8500_SSP0_BASE, IRQ_DB8500_SSP0, pdata)
@@ -83,13 +83,13 @@ db8500_add_ssp(const char *name, resource_size_t base, int irq,
83 db8500_add_ssp("ssp1", U8500_SSP1_BASE, IRQ_DB8500_SSP1, pdata) 83 db8500_add_ssp("ssp1", U8500_SSP1_BASE, IRQ_DB8500_SSP1, pdata)
84 84
85#define db8500_add_spi0(pdata) \ 85#define db8500_add_spi0(pdata) \
86 dbx500_add_spi("spi0", U8500_SPI0_BASE, IRQ_DB8500_SPI0, pdata) 86 dbx500_add_spi("spi0", U8500_SPI0_BASE, IRQ_DB8500_SPI0, pdata, 0)
87#define db8500_add_spi1(pdata) \ 87#define db8500_add_spi1(pdata) \
88 dbx500_add_spi("spi1", U8500_SPI1_BASE, IRQ_DB8500_SPI1, pdata) 88 dbx500_add_spi("spi1", U8500_SPI1_BASE, IRQ_DB8500_SPI1, pdata, 0)
89#define db8500_add_spi2(pdata) \ 89#define db8500_add_spi2(pdata) \
90 dbx500_add_spi("spi2", U8500_SPI2_BASE, IRQ_DB8500_SPI2, pdata) 90 dbx500_add_spi("spi2", U8500_SPI2_BASE, IRQ_DB8500_SPI2, pdata, 0)
91#define db8500_add_spi3(pdata) \ 91#define db8500_add_spi3(pdata) \
92 dbx500_add_spi("spi3", U8500_SPI3_BASE, IRQ_DB8500_SPI3, pdata) 92 dbx500_add_spi("spi3", U8500_SPI3_BASE, IRQ_DB8500_SPI3, pdata, 0)
93 93
94#define db8500_add_uart0(pdata) \ 94#define db8500_add_uart0(pdata) \
95 dbx500_add_uart("uart0", U8500_UART0_BASE, IRQ_DB8500_UART0, pdata) 95 dbx500_add_uart("uart0", U8500_UART0_BASE, IRQ_DB8500_UART0, pdata)
diff --git a/arch/arm/mach-ux500/include/mach/hardware.h b/arch/arm/mach-ux500/include/mach/hardware.h
index 2c6f71049f2e..470ac52663d6 100644
--- a/arch/arm/mach-ux500/include/mach/hardware.h
+++ b/arch/arm/mach-ux500/include/mach/hardware.h
@@ -29,9 +29,6 @@
29#include <mach/db8500-regs.h> 29#include <mach/db8500-regs.h>
30#include <mach/db5500-regs.h> 30#include <mach/db5500-regs.h>
31 31
32/* ST-Ericsson modified pl022 id */
33#define SSP_PER_ID 0x01080022
34
35#ifndef __ASSEMBLY__ 32#ifndef __ASSEMBLY__
36 33
37#include <mach/id.h> 34#include <mach/id.h>
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index c96fa1b3f49f..73b4a8b66a57 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -176,6 +176,7 @@ ENDPROC(v6_coherent_kern_range)
176 */ 176 */
177ENTRY(v6_flush_kern_dcache_area) 177ENTRY(v6_flush_kern_dcache_area)
178 add r1, r0, r1 178 add r1, r0, r1
179 bic r0, r0, #D_CACHE_LINE_SIZE - 1
1791: 1801:
180#ifdef HARVARD_CACHE 181#ifdef HARVARD_CACHE
181 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 182 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index dc18d81ef8ce..d32f02b61866 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -221,6 +221,8 @@ ENDPROC(v7_coherent_user_range)
221ENTRY(v7_flush_kern_dcache_area) 221ENTRY(v7_flush_kern_dcache_area)
222 dcache_line_size r2, r3 222 dcache_line_size r2, r3
223 add r1, r0, r1 223 add r1, r0, r1
224 sub r3, r2, #1
225 bic r0, r0, r3
2241: 2261:
225 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line 227 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
226 add r0, r0, r2 228 add r0, r0, r2
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index b0ee9ba3cfab..8bfae964b133 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -24,9 +24,7 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm);
24 24
25/* 25/*
26 * We fork()ed a process, and we need a new context for the child 26 * We fork()ed a process, and we need a new context for the child
27 * to run in. We reserve version 0 for initial tasks so we will 27 * to run in.
28 * always allocate an ASID. The ASID 0 is reserved for the TTBR
29 * register changing sequence.
30 */ 28 */
31void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) 29void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
32{ 30{
@@ -36,8 +34,11 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
36 34
37static void flush_context(void) 35static void flush_context(void)
38{ 36{
39 /* set the reserved ASID before flushing the TLB */ 37 u32 ttb;
40 asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); 38 /* Copy TTBR1 into TTBR0 */
39 asm volatile("mrc p15, 0, %0, c2, c0, 1\n"
40 "mcr p15, 0, %0, c2, c0, 0"
41 : "=r" (ttb));
41 isb(); 42 isb();
42 local_flush_tlb_all(); 43 local_flush_tlb_all();
43 if (icache_is_vivt_asid_tagged()) { 44 if (icache_is_vivt_asid_tagged()) {
@@ -93,7 +94,7 @@ static void reset_context(void *info)
93 return; 94 return;
94 95
95 smp_rmb(); 96 smp_rmb();
96 asid = cpu_last_asid + cpu + 1; 97 asid = cpu_last_asid + cpu;
97 98
98 flush_context(); 99 flush_context();
99 set_mm_context(mm, asid); 100 set_mm_context(mm, asid);
@@ -143,13 +144,13 @@ void __new_context(struct mm_struct *mm)
143 * to start a new version and flush the TLB. 144 * to start a new version and flush the TLB.
144 */ 145 */
145 if (unlikely((asid & ~ASID_MASK) == 0)) { 146 if (unlikely((asid & ~ASID_MASK) == 0)) {
146 asid = cpu_last_asid + smp_processor_id() + 1; 147 asid = cpu_last_asid + smp_processor_id();
147 flush_context(); 148 flush_context();
148#ifdef CONFIG_SMP 149#ifdef CONFIG_SMP
149 smp_wmb(); 150 smp_wmb();
150 smp_call_function(reset_context, NULL, 1); 151 smp_call_function(reset_context, NULL, 1);
151#endif 152#endif
152 cpu_last_asid += NR_CPUS; 153 cpu_last_asid += NR_CPUS - 1;
153 } 154 }
154 155
155 set_mm_context(mm, asid); 156 set_mm_context(mm, asid);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 3f17ea146f0e..2c2cce9cd8c8 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -15,12 +15,14 @@
15#include <linux/mman.h> 15#include <linux/mman.h>
16#include <linux/nodemask.h> 16#include <linux/nodemask.h>
17#include <linux/initrd.h> 17#include <linux/initrd.h>
18#include <linux/of_fdt.h>
18#include <linux/highmem.h> 19#include <linux/highmem.h>
19#include <linux/gfp.h> 20#include <linux/gfp.h>
20#include <linux/memblock.h> 21#include <linux/memblock.h>
21#include <linux/sort.h> 22#include <linux/sort.h>
22 23
23#include <asm/mach-types.h> 24#include <asm/mach-types.h>
25#include <asm/prom.h>
24#include <asm/sections.h> 26#include <asm/sections.h>
25#include <asm/setup.h> 27#include <asm/setup.h>
26#include <asm/sizes.h> 28#include <asm/sizes.h>
@@ -71,6 +73,14 @@ static int __init parse_tag_initrd2(const struct tag *tag)
71 73
72__tagtable(ATAG_INITRD2, parse_tag_initrd2); 74__tagtable(ATAG_INITRD2, parse_tag_initrd2);
73 75
76#ifdef CONFIG_OF_FLATTREE
77void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
78{
79 phys_initrd_start = start;
80 phys_initrd_size = end - start;
81}
82#endif /* CONFIG_OF_FLATTREE */
83
74/* 84/*
75 * This keeps memory configuration data used by a couple memory 85 * This keeps memory configuration data used by a couple memory
76 * initialization functions, as well as show_mem() for the skipping 86 * initialization functions, as well as show_mem() for the skipping
@@ -273,13 +283,15 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
273 free_area_init_node(0, zone_size, min, zhole_size); 283 free_area_init_node(0, zone_size, min, zhole_size);
274} 284}
275 285
276#ifndef CONFIG_SPARSEMEM 286#ifdef CONFIG_HAVE_ARCH_PFN_VALID
277int pfn_valid(unsigned long pfn) 287int pfn_valid(unsigned long pfn)
278{ 288{
279 return memblock_is_memory(pfn << PAGE_SHIFT); 289 return memblock_is_memory(pfn << PAGE_SHIFT);
280} 290}
281EXPORT_SYMBOL(pfn_valid); 291EXPORT_SYMBOL(pfn_valid);
292#endif
282 293
294#ifndef CONFIG_SPARSEMEM
283static void arm_memory_present(void) 295static void arm_memory_present(void)
284{ 296{
285} 297}
@@ -334,6 +346,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
334#endif 346#endif
335 347
336 arm_mm_memblock_reserve(); 348 arm_mm_memblock_reserve();
349 arm_dt_memblock_reserve();
337 350
338 /* reserve any platform specific memblock areas */ 351 /* reserve any platform specific memblock areas */
339 if (mdesc->reserve) 352 if (mdesc->reserve)
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d2384106af9c..5b3d7d543659 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -5,14 +5,9 @@ extern pmd_t *top_pmd;
5 5
6#define TOP_PTE(x) pte_offset_kernel(top_pmd, x) 6#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
7 7
8static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
9{
10 return pmd_offset(pud_offset(pgd, virt), virt);
11}
12
13static inline pmd_t *pmd_off_k(unsigned long virt) 8static inline pmd_t *pmd_off_k(unsigned long virt)
14{ 9{
15 return pmd_off(pgd_offset_k(virt), virt); 10 return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
16} 11}
17 12
18struct mem_type { 13struct mem_type {
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 08a92368d9d3..9d9e736c2b4f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -763,15 +763,12 @@ static void __init sanity_check_meminfo(void)
763{ 763{
764 int i, j, highmem = 0; 764 int i, j, highmem = 0;
765 765
766 lowmem_limit = __pa(vmalloc_min - 1) + 1;
767 memblock_set_current_limit(lowmem_limit);
768
769 for (i = 0, j = 0; i < meminfo.nr_banks; i++) { 766 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
770 struct membank *bank = &meminfo.bank[j]; 767 struct membank *bank = &meminfo.bank[j];
771 *bank = meminfo.bank[i]; 768 *bank = meminfo.bank[i];
772 769
773#ifdef CONFIG_HIGHMEM 770#ifdef CONFIG_HIGHMEM
774 if (__va(bank->start) > vmalloc_min || 771 if (__va(bank->start) >= vmalloc_min ||
775 __va(bank->start) < (void *)PAGE_OFFSET) 772 __va(bank->start) < (void *)PAGE_OFFSET)
776 highmem = 1; 773 highmem = 1;
777 774
@@ -829,6 +826,9 @@ static void __init sanity_check_meminfo(void)
829 bank->size = newsize; 826 bank->size = newsize;
830 } 827 }
831#endif 828#endif
829 if (!bank->highmem && bank->start + bank->size > lowmem_limit)
830 lowmem_limit = bank->start + bank->size;
831
832 j++; 832 j++;
833 } 833 }
834#ifdef CONFIG_HIGHMEM 834#ifdef CONFIG_HIGHMEM
@@ -852,6 +852,7 @@ static void __init sanity_check_meminfo(void)
852 } 852 }
853#endif 853#endif
854 meminfo.nr_banks = j; 854 meminfo.nr_banks = j;
855 memblock_set_current_limit(lowmem_limit);
855} 856}
856 857
857static inline void prepare_page_table(void) 858static inline void prepare_page_table(void)
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index ab17cc0d3fa7..1d2b8451bf25 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -213,7 +213,9 @@ __v6_setup:
213 mcr p15, 0, r0, c2, c0, 2 @ TTB control register 213 mcr p15, 0, r0, c2, c0, 2 @ TTB control register
214 ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) 214 ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
215 ALT_UP(orr r4, r4, #TTB_FLAGS_UP) 215 ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
216 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 216 ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP)
217 ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
218 mcr p15, 0, r8, c2, c0, 1 @ load TTB1
217#endif /* CONFIG_MMU */ 219#endif /* CONFIG_MMU */
218 adr r5, v6_crval 220 adr r5, v6_crval
219 ldmia r5, {r5, r6} 221 ldmia r5, {r5, r6}
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index babfba09c89f..b3b566ec83d3 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -108,18 +108,16 @@ ENTRY(cpu_v7_switch_mm)
108#ifdef CONFIG_ARM_ERRATA_430973 108#ifdef CONFIG_ARM_ERRATA_430973
109 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 109 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
110#endif 110#endif
111#ifdef CONFIG_ARM_ERRATA_754322 111 mrc p15, 0, r2, c2, c0, 1 @ load TTB 1
112 dsb 112 mcr p15, 0, r2, c2, c0, 0 @ into TTB 0
113#endif
114 mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
115 isb
1161: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
117 isb 113 isb
118#ifdef CONFIG_ARM_ERRATA_754322 114#ifdef CONFIG_ARM_ERRATA_754322
119 dsb 115 dsb
120#endif 116#endif
121 mcr p15, 0, r1, c13, c0, 1 @ set context ID 117 mcr p15, 0, r1, c13, c0, 1 @ set context ID
122 isb 118 isb
119 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
120 isb
123#endif 121#endif
124 mov pc, lr 122 mov pc, lr
125ENDPROC(cpu_v7_switch_mm) 123ENDPROC(cpu_v7_switch_mm)
@@ -368,7 +366,9 @@ __v7_setup:
368 mcr p15, 0, r10, c2, c0, 2 @ TTB control register 366 mcr p15, 0, r10, c2, c0, 2 @ TTB control register
369 ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) 367 ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
370 ALT_UP(orr r4, r4, #TTB_FLAGS_UP) 368 ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
371 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 369 ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP)
370 ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
371 mcr p15, 0, r8, c2, c0, 1 @ load TTB1
372 ldr r5, =PRRR @ PRRR 372 ldr r5, =PRRR @ PRRR
373 ldr r6, =NMRR @ NMRR 373 ldr r6, =NMRR @ NMRR
374 mcr p15, 0, r5, c10, c2, 0 @ write PRRR 374 mcr p15, 0, r5, c10, c2, 0 @ write PRRR
diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig
index 6751bcf7b888..e98f5c5c7879 100644
--- a/arch/arm/plat-s5p/Kconfig
+++ b/arch/arm/plat-s5p/Kconfig
@@ -7,7 +7,7 @@
7 7
8config PLAT_S5P 8config PLAT_S5P
9 bool 9 bool
10 depends on (ARCH_S5P64X0 || ARCH_S5P6442 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS4) 10 depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS4)
11 default y 11 default y
12 select ARM_VIC if !ARCH_EXYNOS4 12 select ARM_VIC if !ARCH_EXYNOS4
13 select ARM_GIC if ARCH_EXYNOS4 13 select ARM_GIC if ARCH_EXYNOS4
diff --git a/arch/arm/plat-s5p/cpu.c b/arch/arm/plat-s5p/cpu.c
index 5cf5e721e6ca..bbc2aa7449ca 100644
--- a/arch/arm/plat-s5p/cpu.c
+++ b/arch/arm/plat-s5p/cpu.c
@@ -21,7 +21,6 @@
21 21
22#include <plat/cpu.h> 22#include <plat/cpu.h>
23#include <plat/s5p6440.h> 23#include <plat/s5p6440.h>
24#include <plat/s5p6442.h>
25#include <plat/s5p6450.h> 24#include <plat/s5p6450.h>
26#include <plat/s5pc100.h> 25#include <plat/s5pc100.h>
27#include <plat/s5pv210.h> 26#include <plat/s5pv210.h>
@@ -30,7 +29,6 @@
30/* table of supported CPUs */ 29/* table of supported CPUs */
31 30
32static const char name_s5p6440[] = "S5P6440"; 31static const char name_s5p6440[] = "S5P6440";
33static const char name_s5p6442[] = "S5P6442";
34static const char name_s5p6450[] = "S5P6450"; 32static const char name_s5p6450[] = "S5P6450";
35static const char name_s5pc100[] = "S5PC100"; 33static const char name_s5pc100[] = "S5PC100";
36static const char name_s5pv210[] = "S5PV210/S5PC110"; 34static const char name_s5pv210[] = "S5PV210/S5PC110";
@@ -46,14 +44,6 @@ static struct cpu_table cpu_ids[] __initdata = {
46 .init = s5p64x0_init, 44 .init = s5p64x0_init,
47 .name = name_s5p6440, 45 .name = name_s5p6440,
48 }, { 46 }, {
49 .idcode = 0x36442000,
50 .idmask = 0xfffff000,
51 .map_io = s5p6442_map_io,
52 .init_clocks = s5p6442_init_clocks,
53 .init_uarts = s5p6442_init_uarts,
54 .init = s5p6442_init,
55 .name = name_s5p6442,
56 }, {
57 .idcode = 0x36450000, 47 .idcode = 0x36450000,
58 .idmask = 0xfffff000, 48 .idmask = 0xfffff000,
59 .map_io = s5p6450_map_io, 49 .map_io = s5p6450_map_io,
diff --git a/arch/arm/plat-s5p/include/plat/s5p6442.h b/arch/arm/plat-s5p/include/plat/s5p6442.h
deleted file mode 100644
index 7b8801349c94..000000000000
--- a/arch/arm/plat-s5p/include/plat/s5p6442.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/* arch/arm/plat-s5p/include/plat/s5p6442.h
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * Header file for s5p6442 cpu support
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13/* Common init code for S5P6442 related SoCs */
14
15extern void s5p6442_common_init_uarts(struct s3c2410_uartcfg *cfg, int no);
16extern void s5p6442_register_clocks(void);
17extern void s5p6442_setup_clocks(void);
18
19#ifdef CONFIG_CPU_S5P6442
20
21extern int s5p6442_init(void);
22extern void s5p6442_init_irq(void);
23extern void s5p6442_map_io(void);
24extern void s5p6442_init_clocks(int xtal);
25
26#define s5p6442_init_uarts s5p6442_common_init_uarts
27
28#else
29#define s5p6442_init_clocks NULL
30#define s5p6442_init_uarts NULL
31#define s5p6442_map_io NULL
32#define s5p6442_init NULL
33#endif
diff --git a/arch/arm/plat-samsung/include/plat/cpu.h b/arch/arm/plat-samsung/include/plat/cpu.h
index 3aedac0034ba..c0a5741b23e6 100644
--- a/arch/arm/plat-samsung/include/plat/cpu.h
+++ b/arch/arm/plat-samsung/include/plat/cpu.h
@@ -86,7 +86,6 @@ extern struct sysdev_class s3c2443_sysclass;
86extern struct sysdev_class s3c6410_sysclass; 86extern struct sysdev_class s3c6410_sysclass;
87extern struct sysdev_class s3c64xx_sysclass; 87extern struct sysdev_class s3c64xx_sysclass;
88extern struct sysdev_class s5p64x0_sysclass; 88extern struct sysdev_class s5p64x0_sysclass;
89extern struct sysdev_class s5p6442_sysclass;
90extern struct sysdev_class s5pv210_sysclass; 89extern struct sysdev_class s5pv210_sysclass;
91extern struct sysdev_class exynos4_sysclass; 90extern struct sysdev_class exynos4_sysclass;
92 91
diff --git a/arch/arm/plat-samsung/include/plat/debug-macro.S b/arch/arm/plat-samsung/include/plat/debug-macro.S
index dc6efd90e8ff..207e275362a8 100644
--- a/arch/arm/plat-samsung/include/plat/debug-macro.S
+++ b/arch/arm/plat-samsung/include/plat/debug-macro.S
@@ -11,7 +11,7 @@
11 11
12#include <plat/regs-serial.h> 12#include <plat/regs-serial.h>
13 13
14/* The S5PV210/S5PC110 and S5P6442 implementations are as belows. */ 14/* The S5PV210/S5PC110 implementations are as belows. */
15 15
16 .macro fifo_level_s5pv210 rd, rx 16 .macro fifo_level_s5pv210 rd, rx
17 ldr \rd, [ \rx, # S3C2410_UFSTAT ] 17 ldr \rd, [ \rx, # S3C2410_UFSTAT ]
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index 39818d8da420..b61b8ee7cc52 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -111,12 +111,6 @@ extern struct platform_device exynos4_device_spdif;
111extern struct platform_device exynos4_device_pd[]; 111extern struct platform_device exynos4_device_pd[];
112extern struct platform_device exynos4_device_ahci; 112extern struct platform_device exynos4_device_ahci;
113 113
114extern struct platform_device s5p6442_device_pcm0;
115extern struct platform_device s5p6442_device_pcm1;
116extern struct platform_device s5p6442_device_iis0;
117extern struct platform_device s5p6442_device_iis1;
118extern struct platform_device s5p6442_device_spi;
119
120extern struct platform_device s5p6440_device_pcm; 114extern struct platform_device s5p6440_device_pcm;
121extern struct platform_device s5p6440_device_iis; 115extern struct platform_device s5p6440_device_iis;
122 116
diff --git a/arch/arm/plat-samsung/include/plat/regs-serial.h b/arch/arm/plat-samsung/include/plat/regs-serial.h
index 788837e99cb3..c151c5f94a87 100644
--- a/arch/arm/plat-samsung/include/plat/regs-serial.h
+++ b/arch/arm/plat-samsung/include/plat/regs-serial.h
@@ -194,7 +194,7 @@
194#define S3C64XX_UINTSP 0x34 194#define S3C64XX_UINTSP 0x34
195#define S3C64XX_UINTM 0x38 195#define S3C64XX_UINTM 0x38
196 196
197/* Following are specific to S5PV210 and S5P6442 */ 197/* Following are specific to S5PV210 */
198#define S5PV210_UCON_CLKMASK (1<<10) 198#define S5PV210_UCON_CLKMASK (1<<10)
199#define S5PV210_UCON_PCLK (0<<10) 199#define S5PV210_UCON_PCLK (0<<10)
200#define S5PV210_UCON_UCLK (1<<10) 200#define S5PV210_UCON_UCLK (1<<10)
diff --git a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
index ff1a561b326e..0ffe34a21554 100644
--- a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
+++ b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
@@ -69,6 +69,5 @@ extern void s3c64xx_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
69extern void s5pc100_spi_set_info(int cntrlr, int src_clk_nr, int num_cs); 69extern void s5pc100_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
70extern void s5pv210_spi_set_info(int cntrlr, int src_clk_nr, int num_cs); 70extern void s5pv210_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
71extern void s5p64x0_spi_set_info(int cntrlr, int src_clk_nr, int num_cs); 71extern void s5p64x0_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
72extern void s5p6442_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
73 72
74#endif /* __S3C64XX_PLAT_SPI_H */ 73#endif /* __S3C64XX_PLAT_SPI_H */
diff --git a/arch/avr32/include/asm/bitops.h b/arch/avr32/include/asm/bitops.h
index 72444d97f80c..b70c19bab63a 100644
--- a/arch/avr32/include/asm/bitops.h
+++ b/arch/avr32/include/asm/bitops.h
@@ -270,14 +270,21 @@ static inline int __fls(unsigned long word)
270 270
271unsigned long find_first_zero_bit(const unsigned long *addr, 271unsigned long find_first_zero_bit(const unsigned long *addr,
272 unsigned long size); 272 unsigned long size);
273#define find_first_zero_bit find_first_zero_bit
274
273unsigned long find_next_zero_bit(const unsigned long *addr, 275unsigned long find_next_zero_bit(const unsigned long *addr,
274 unsigned long size, 276 unsigned long size,
275 unsigned long offset); 277 unsigned long offset);
278#define find_next_zero_bit find_next_zero_bit
279
276unsigned long find_first_bit(const unsigned long *addr, 280unsigned long find_first_bit(const unsigned long *addr,
277 unsigned long size); 281 unsigned long size);
282#define find_first_bit find_first_bit
283
278unsigned long find_next_bit(const unsigned long *addr, 284unsigned long find_next_bit(const unsigned long *addr,
279 unsigned long size, 285 unsigned long size,
280 unsigned long offset); 286 unsigned long offset);
287#define find_next_bit find_next_bit
281 288
282/* 289/*
283 * ffs: find first bit set. This is defined the same way as 290 * ffs: find first bit set. This is defined the same way as
@@ -299,6 +306,14 @@ static inline int ffs(unsigned long word)
299#include <asm-generic/bitops/hweight.h> 306#include <asm-generic/bitops/hweight.h>
300#include <asm-generic/bitops/lock.h> 307#include <asm-generic/bitops/lock.h>
301 308
309extern unsigned long find_next_zero_bit_le(const void *addr,
310 unsigned long size, unsigned long offset);
311#define find_next_zero_bit_le find_next_zero_bit_le
312
313extern unsigned long find_next_bit_le(const void *addr,
314 unsigned long size, unsigned long offset);
315#define find_next_bit_le find_next_bit_le
316
302#include <asm-generic/bitops/le.h> 317#include <asm-generic/bitops/le.h>
303#include <asm-generic/bitops/ext2-atomic.h> 318#include <asm-generic/bitops/ext2-atomic.h>
304 319
diff --git a/arch/avr32/include/asm/unistd.h b/arch/avr32/include/asm/unistd.h
index 89861a27543e..f714544e5560 100644
--- a/arch/avr32/include/asm/unistd.h
+++ b/arch/avr32/include/asm/unistd.h
@@ -299,9 +299,10 @@
299#define __NR_signalfd 279 299#define __NR_signalfd 279
300/* 280 was __NR_timerfd */ 300/* 280 was __NR_timerfd */
301#define __NR_eventfd 281 301#define __NR_eventfd 281
302#define __NR_setns 283
302 303
303#ifdef __KERNEL__ 304#ifdef __KERNEL__
304#define NR_syscalls 282 305#define NR_syscalls 284
305 306
306/* Old stuff */ 307/* Old stuff */
307#define __IGNORE_uselib 308#define __IGNORE_uselib
diff --git a/arch/avr32/kernel/syscall_table.S b/arch/avr32/kernel/syscall_table.S
index e76bad16b0f0..c7fd394d28a4 100644
--- a/arch/avr32/kernel/syscall_table.S
+++ b/arch/avr32/kernel/syscall_table.S
@@ -296,4 +296,5 @@ sys_call_table:
296 .long sys_ni_syscall /* 280, was sys_timerfd */ 296 .long sys_ni_syscall /* 280, was sys_timerfd */
297 .long sys_eventfd 297 .long sys_eventfd
298 .long sys_recvmmsg 298 .long sys_recvmmsg
299 .long sys_setns
299 .long sys_ni_syscall /* r8 is saturated at nr_syscalls */ 300 .long sys_ni_syscall /* r8 is saturated at nr_syscalls */
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index bfc9d071db9b..aa677e2a3823 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1014,6 +1014,7 @@ static struct platform_device *__initdata at32_usarts[4];
1014void __init at32_map_usart(unsigned int hw_id, unsigned int line, int flags) 1014void __init at32_map_usart(unsigned int hw_id, unsigned int line, int flags)
1015{ 1015{
1016 struct platform_device *pdev; 1016 struct platform_device *pdev;
1017 struct atmel_uart_data *pdata;
1017 1018
1018 switch (hw_id) { 1019 switch (hw_id) {
1019 case 0: 1020 case 0:
@@ -1042,7 +1043,8 @@ void __init at32_map_usart(unsigned int hw_id, unsigned int line, int flags)
1042 data->regs = (void __iomem *)pdev->resource[0].start; 1043 data->regs = (void __iomem *)pdev->resource[0].start;
1043 } 1044 }
1044 1045
1045 pdev->id = line; 1046 pdata = pdev->dev.platform_data;
1047 pdata->num = portnr;
1046 at32_usarts[line] = pdev; 1048 at32_usarts[line] = pdev;
1047} 1049}
1048 1050
diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h
index 61740201b311..679458d9a622 100644
--- a/arch/avr32/mach-at32ap/include/mach/board.h
+++ b/arch/avr32/mach-at32ap/include/mach/board.h
@@ -33,6 +33,7 @@ extern struct platform_device *atmel_default_console_device;
33#define ATMEL_USART_CLK 0x04 33#define ATMEL_USART_CLK 0x04
34 34
35struct atmel_uart_data { 35struct atmel_uart_data {
36 int num; /* port num */
36 short use_dma_tx; /* use transmit DMA? */ 37 short use_dma_tx; /* use transmit DMA? */
37 short use_dma_rx; /* use receive DMA? */ 38 short use_dma_rx; /* use receive DMA? */
38 void __iomem *regs; /* virtual base address, if any */ 39 void __iomem *regs; /* virtual base address, if any */
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index a18180f2d007..d619b17c4413 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -47,9 +47,6 @@ config GENERIC_BUG
47config ZONE_DMA 47config ZONE_DMA
48 def_bool y 48 def_bool y
49 49
50config GENERIC_FIND_NEXT_BIT
51 def_bool y
52
53config GENERIC_GPIO 50config GENERIC_GPIO
54 def_bool y 51 def_bool y
55 52
diff --git a/arch/blackfin/include/asm/kgdb.h b/arch/blackfin/include/asm/kgdb.h
index 3ac0c72e9fee..aaf884591b07 100644
--- a/arch/blackfin/include/asm/kgdb.h
+++ b/arch/blackfin/include/asm/kgdb.h
@@ -108,6 +108,7 @@ static inline void arch_kgdb_breakpoint(void)
108#else 108#else
109# define CACHE_FLUSH_IS_SAFE 1 109# define CACHE_FLUSH_IS_SAFE 1
110#endif 110#endif
111#define GDB_ADJUSTS_BREAK_OFFSET
111#define HW_INST_WATCHPOINT_NUM 6 112#define HW_INST_WATCHPOINT_NUM 6
112#define HW_WATCHPOINT_NUM 8 113#define HW_WATCHPOINT_NUM 8
113#define TYPE_INST_WATCHPOINT 0 114#define TYPE_INST_WATCHPOINT 0
diff --git a/arch/blackfin/include/asm/ptrace.h b/arch/blackfin/include/asm/ptrace.h
index 1066d63e62b5..7854d4367c15 100644
--- a/arch/blackfin/include/asm/ptrace.h
+++ b/arch/blackfin/include/asm/ptrace.h
@@ -102,9 +102,6 @@ struct pt_regs {
102/* user_mode returns true if only one bit is set in IPEND, other than the 102/* user_mode returns true if only one bit is set in IPEND, other than the
103 master interrupt enable. */ 103 master interrupt enable. */
104#define user_mode(regs) (!(((regs)->ipend & ~0x10) & (((regs)->ipend & ~0x10) - 1))) 104#define user_mode(regs) (!(((regs)->ipend & ~0x10) & (((regs)->ipend & ~0x10) - 1)))
105#define instruction_pointer(regs) ((regs)->pc)
106#define user_stack_pointer(regs) ((regs)->usp)
107#define profile_pc(regs) instruction_pointer(regs)
108extern void show_regs(struct pt_regs *); 105extern void show_regs(struct pt_regs *);
109 106
110#define arch_has_single_step() (1) 107#define arch_has_single_step() (1)
@@ -128,6 +125,8 @@ extern int is_user_addr_valid(struct task_struct *child,
128 ((unsigned long)task_stack_page(task) + \ 125 ((unsigned long)task_stack_page(task) + \
129 (THREAD_SIZE - sizeof(struct pt_regs))) 126 (THREAD_SIZE - sizeof(struct pt_regs)))
130 127
128#include <asm-generic/ptrace.h>
129
131#endif /* __KERNEL__ */ 130#endif /* __KERNEL__ */
132 131
133#endif /* __ASSEMBLY__ */ 132#endif /* __ASSEMBLY__ */
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h
index ff9a9f35d50b..6ff9c411b145 100644
--- a/arch/blackfin/include/asm/unistd.h
+++ b/arch/blackfin/include/asm/unistd.h
@@ -397,8 +397,9 @@
397#define __NR_open_by_handle_at 376 397#define __NR_open_by_handle_at 376
398#define __NR_clock_adjtime 377 398#define __NR_clock_adjtime 377
399#define __NR_syncfs 378 399#define __NR_syncfs 378
400#define __NR_setns 379
400 401
401#define __NR_syscall 379 402#define __NR_syscall 380
402#define NR_syscalls __NR_syscall 403#define NR_syscalls __NR_syscall
403 404
404/* Old optional stuff no one actually uses */ 405/* Old optional stuff no one actually uses */
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index e16dc4560048..76db1d483173 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -382,7 +382,6 @@ static struct platform_device net2272_bfin_device = {
382#endif 382#endif
383 383
384#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE) 384#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
385#ifdef CONFIG_MTD_PARTITIONS
386const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL }; 385const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
387 386
388static struct mtd_partition bfin_plat_nand_partitions[] = { 387static struct mtd_partition bfin_plat_nand_partitions[] = {
@@ -396,7 +395,6 @@ static struct mtd_partition bfin_plat_nand_partitions[] = {
396 .offset = MTDPART_OFS_APPEND, 395 .offset = MTDPART_OFS_APPEND,
397 }, 396 },
398}; 397};
399#endif
400 398
401#define BFIN_NAND_PLAT_CLE 2 399#define BFIN_NAND_PLAT_CLE 2
402#define BFIN_NAND_PLAT_ALE 1 400#define BFIN_NAND_PLAT_ALE 1
@@ -423,11 +421,9 @@ static struct platform_nand_data bfin_plat_nand_data = {
423 .chip = { 421 .chip = {
424 .nr_chips = 1, 422 .nr_chips = 1,
425 .chip_delay = 30, 423 .chip_delay = 30,
426#ifdef CONFIG_MTD_PARTITIONS
427 .part_probe_types = part_probes, 424 .part_probe_types = part_probes,
428 .partitions = bfin_plat_nand_partitions, 425 .partitions = bfin_plat_nand_partitions,
429 .nr_partitions = ARRAY_SIZE(bfin_plat_nand_partitions), 426 .nr_partitions = ARRAY_SIZE(bfin_plat_nand_partitions),
430#endif
431 }, 427 },
432 .ctrl = { 428 .ctrl = {
433 .cmd_ctrl = bfin_plat_nand_cmd_ctrl, 429 .cmd_ctrl = bfin_plat_nand_cmd_ctrl,
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 3926cd909b66..9231a942892b 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -243,7 +243,6 @@ static struct platform_device bfin_uart0_device = {
243 243
244#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE) 244#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
245 245
246#ifdef CONFIG_MTD_PARTITIONS
247const char *part_probes[] = { "cmdlinepart", NULL }; 246const char *part_probes[] = { "cmdlinepart", NULL };
248 247
249static struct mtd_partition bfin_plat_nand_partitions[] = { 248static struct mtd_partition bfin_plat_nand_partitions[] = {
@@ -257,7 +256,6 @@ static struct mtd_partition bfin_plat_nand_partitions[] = {
257 .offset = MTDPART_OFS_APPEND, 256 .offset = MTDPART_OFS_APPEND,
258 }, 257 },
259}; 258};
260#endif
261 259
262#define BFIN_NAND_PLAT_CLE 2 260#define BFIN_NAND_PLAT_CLE 2
263#define BFIN_NAND_PLAT_ALE 3 261#define BFIN_NAND_PLAT_ALE 3
@@ -286,11 +284,9 @@ static struct platform_nand_data bfin_plat_nand_data = {
286 .chip = { 284 .chip = {
287 .nr_chips = 1, 285 .nr_chips = 1,
288 .chip_delay = 30, 286 .chip_delay = 30,
289#ifdef CONFIG_MTD_PARTITIONS
290 .part_probe_types = part_probes, 287 .part_probe_types = part_probes,
291 .partitions = bfin_plat_nand_partitions, 288 .partitions = bfin_plat_nand_partitions,
292 .nr_partitions = ARRAY_SIZE(bfin_plat_nand_partitions), 289 .nr_partitions = ARRAY_SIZE(bfin_plat_nand_partitions),
293#endif
294 }, 290 },
295 .ctrl = { 291 .ctrl = {
296 .cmd_ctrl = bfin_plat_nand_cmd_ctrl, 292 .cmd_ctrl = bfin_plat_nand_cmd_ctrl,
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index f96933f48a7f..dda11ef06be5 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -1753,6 +1753,7 @@ ENTRY(_sys_call_table)
1753 .long _sys_open_by_handle_at 1753 .long _sys_open_by_handle_at
1754 .long _sys_clock_adjtime 1754 .long _sys_clock_adjtime
1755 .long _sys_syncfs 1755 .long _sys_syncfs
1756 .long _sys_setns
1756 1757
1757 .rept NR_syscalls-(.-_sys_call_table)/4 1758 .rept NR_syscalls-(.-_sys_call_table)/4
1758 .long _sys_ni_syscall 1759 .long _sys_ni_syscall
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index a6d03069d0ff..17addacb169e 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -31,10 +31,6 @@ config ARCH_HAS_ILOG2_U64
31 bool 31 bool
32 default n 32 default n
33 33
34config GENERIC_FIND_NEXT_BIT
35 bool
36 default y
37
38config GENERIC_HWEIGHT 34config GENERIC_HWEIGHT
39 bool 35 bool
40 default y 36 default y
@@ -274,7 +270,6 @@ config ETRAX_AXISFLASHMAP
274 select MTD_JEDECPROBE if ETRAX_ARCH_V32 270 select MTD_JEDECPROBE if ETRAX_ARCH_V32
275 select MTD_CHAR 271 select MTD_CHAR
276 select MTD_BLOCK 272 select MTD_BLOCK
277 select MTD_PARTITIONS
278 select MTD_COMPLEX_MAPPINGS 273 select MTD_COMPLEX_MAPPINGS
279 help 274 help
280 This option enables MTD mapping of flash devices. Needed to use 275 This option enables MTD mapping of flash devices. Needed to use
diff --git a/arch/cris/arch-v10/drivers/axisflashmap.c b/arch/cris/arch-v10/drivers/axisflashmap.c
index ed708e19d09e..a4bbdfd37bd8 100644
--- a/arch/cris/arch-v10/drivers/axisflashmap.c
+++ b/arch/cris/arch-v10/drivers/axisflashmap.c
@@ -372,7 +372,7 @@ static int __init init_axis_flash(void)
372#ifdef CONFIG_ETRAX_AXISFLASHMAP_MTD0WHOLE 372#ifdef CONFIG_ETRAX_AXISFLASHMAP_MTD0WHOLE
373 if (mymtd) { 373 if (mymtd) {
374 main_partition.size = mymtd->size; 374 main_partition.size = mymtd->size;
375 err = add_mtd_partitions(mymtd, &main_partition, 1); 375 err = mtd_device_register(mymtd, &main_partition, 1);
376 if (err) 376 if (err)
377 panic("axisflashmap: Could not initialize " 377 panic("axisflashmap: Could not initialize "
378 "partition for whole main mtd device!\n"); 378 "partition for whole main mtd device!\n");
@@ -382,10 +382,12 @@ static int __init init_axis_flash(void)
382 if (mymtd) { 382 if (mymtd) {
383 if (use_default_ptable) { 383 if (use_default_ptable) {
384 printk(KERN_INFO " Using default partition table.\n"); 384 printk(KERN_INFO " Using default partition table.\n");
385 err = add_mtd_partitions(mymtd, axis_default_partitions, 385 err = mtd_device_register(mymtd,
386 NUM_DEFAULT_PARTITIONS); 386 axis_default_partitions,
387 NUM_DEFAULT_PARTITIONS);
387 } else { 388 } else {
388 err = add_mtd_partitions(mymtd, axis_partitions, pidx); 389 err = mtd_device_register(mymtd, axis_partitions,
390 pidx);
389 } 391 }
390 392
391 if (err) 393 if (err)
diff --git a/arch/cris/arch-v10/kernel/entry.S b/arch/cris/arch-v10/kernel/entry.S
index 0d6420d087fd..1161883eb582 100644
--- a/arch/cris/arch-v10/kernel/entry.S
+++ b/arch/cris/arch-v10/kernel/entry.S
@@ -937,6 +937,7 @@ sys_call_table:
937 .long sys_inotify_init1 937 .long sys_inotify_init1
938 .long sys_preadv 938 .long sys_preadv
939 .long sys_pwritev 939 .long sys_pwritev
940 .long sys_setns /* 335 */
940 941
941 /* 942 /*
942 * NOTE!! This doesn't have to be exact - we just have 943 * NOTE!! This doesn't have to be exact - we just have
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index 1633b120aa81..41a2732e8b9c 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -405,7 +405,6 @@ config ETRAX_AXISFLASHMAP
405 select MTD_JEDECPROBE 405 select MTD_JEDECPROBE
406 select MTD_CHAR 406 select MTD_CHAR
407 select MTD_BLOCK 407 select MTD_BLOCK
408 select MTD_PARTITIONS
409 select MTD_COMPLEX_MAPPINGS 408 select MTD_COMPLEX_MAPPINGS
410 help 409 help
411 This option enables MTD mapping of flash devices. Needed to use 410 This option enables MTD mapping of flash devices. Needed to use
diff --git a/arch/cris/arch-v32/drivers/axisflashmap.c b/arch/cris/arch-v32/drivers/axisflashmap.c
index 7b155f8203b8..a2bde3744622 100644
--- a/arch/cris/arch-v32/drivers/axisflashmap.c
+++ b/arch/cris/arch-v32/drivers/axisflashmap.c
@@ -561,7 +561,7 @@ static int __init init_axis_flash(void)
561#ifdef CONFIG_ETRAX_AXISFLASHMAP_MTD0WHOLE 561#ifdef CONFIG_ETRAX_AXISFLASHMAP_MTD0WHOLE
562 if (main_mtd) { 562 if (main_mtd) {
563 main_partition.size = main_mtd->size; 563 main_partition.size = main_mtd->size;
564 err = add_mtd_partitions(main_mtd, &main_partition, 1); 564 err = mtd_device_register(main_mtd, &main_partition, 1);
565 if (err) 565 if (err)
566 panic("axisflashmap: Could not initialize " 566 panic("axisflashmap: Could not initialize "
567 "partition for whole main mtd device!\n"); 567 "partition for whole main mtd device!\n");
@@ -597,7 +597,8 @@ static int __init init_axis_flash(void)
597 mtd_ram->erasesize = (main_mtd ? main_mtd->erasesize : 597 mtd_ram->erasesize = (main_mtd ? main_mtd->erasesize :
598 CONFIG_ETRAX_PTABLE_SECTOR); 598 CONFIG_ETRAX_PTABLE_SECTOR);
599 } else { 599 } else {
600 err = add_mtd_partitions(main_mtd, &partition[part], 1); 600 err = mtd_device_register(main_mtd, &partition[part],
601 1);
601 if (err) 602 if (err)
602 panic("axisflashmap: Could not add mtd " 603 panic("axisflashmap: Could not add mtd "
603 "partition %d\n", part); 604 "partition %d\n", part);
@@ -633,7 +634,7 @@ static int __init init_axis_flash(void)
633#ifndef CONFIG_ETRAX_VCS_SIM 634#ifndef CONFIG_ETRAX_VCS_SIM
634 if (aux_mtd) { 635 if (aux_mtd) {
635 aux_partition.size = aux_mtd->size; 636 aux_partition.size = aux_mtd->size;
636 err = add_mtd_partitions(aux_mtd, &aux_partition, 1); 637 err = mtd_device_register(aux_mtd, &aux_partition, 1);
637 if (err) 638 if (err)
638 panic("axisflashmap: Could not initialize " 639 panic("axisflashmap: Could not initialize "
639 "aux mtd device!\n"); 640 "aux mtd device!\n");
diff --git a/arch/cris/arch-v32/kernel/entry.S b/arch/cris/arch-v32/kernel/entry.S
index 3abf12c23e5f..84fed7e91ada 100644
--- a/arch/cris/arch-v32/kernel/entry.S
+++ b/arch/cris/arch-v32/kernel/entry.S
@@ -880,6 +880,7 @@ sys_call_table:
880 .long sys_inotify_init1 880 .long sys_inotify_init1
881 .long sys_preadv 881 .long sys_preadv
882 .long sys_pwritev 882 .long sys_pwritev
883 .long sys_setns /* 335 */
883 884
884 /* 885 /*
885 * NOTE!! This doesn't have to be exact - we just have 886 * NOTE!! This doesn't have to be exact - we just have
diff --git a/arch/cris/include/asm/unistd.h b/arch/cris/include/asm/unistd.h
index f6fad83b3a8c..f921b8b0f97e 100644
--- a/arch/cris/include/asm/unistd.h
+++ b/arch/cris/include/asm/unistd.h
@@ -339,10 +339,11 @@
339#define __NR_inotify_init1 332 339#define __NR_inotify_init1 332
340#define __NR_preadv 333 340#define __NR_preadv 333
341#define __NR_pwritev 334 341#define __NR_pwritev 334
342#define __NR_setns 335
342 343
343#ifdef __KERNEL__ 344#ifdef __KERNEL__
344 345
345#define NR_syscalls 335 346#define NR_syscalls 336
346 347
347#include <arch/unistd.h> 348#include <arch/unistd.h>
348 349
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 064f62196745..cb884e489425 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -19,14 +19,6 @@ config RWSEM_GENERIC_SPINLOCK
19config RWSEM_XCHGADD_ALGORITHM 19config RWSEM_XCHGADD_ALGORITHM
20 bool 20 bool
21 21
22config GENERIC_FIND_NEXT_BIT
23 bool
24 default y
25
26config GENERIC_FIND_BIT_LE
27 bool
28 default y
29
30config GENERIC_HWEIGHT 22config GENERIC_HWEIGHT
31 bool 23 bool
32 default y 24 default y
diff --git a/arch/frv/include/asm/suspend.h b/arch/frv/include/asm/suspend.h
deleted file mode 100644
index 5fa7b5a6ee40..000000000000
--- a/arch/frv/include/asm/suspend.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/* suspend.h: suspension stuff
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _ASM_SUSPEND_H
13#define _ASM_SUSPEND_H
14
15static inline int arch_prepare_suspend(void)
16{
17 return 0;
18}
19
20#endif /* _ASM_SUSPEND_H */
diff --git a/arch/frv/include/asm/unistd.h b/arch/frv/include/asm/unistd.h
index b28da499e22a..a569dff7cd59 100644
--- a/arch/frv/include/asm/unistd.h
+++ b/arch/frv/include/asm/unistd.h
@@ -343,10 +343,11 @@
343#define __NR_pwritev 334 343#define __NR_pwritev 334
344#define __NR_rt_tgsigqueueinfo 335 344#define __NR_rt_tgsigqueueinfo 335
345#define __NR_perf_event_open 336 345#define __NR_perf_event_open 336
346#define __NR_setns 337
346 347
347#ifdef __KERNEL__ 348#ifdef __KERNEL__
348 349
349#define NR_syscalls 337 350#define NR_syscalls 338
350 351
351#define __ARCH_WANT_IPC_PARSE_VERSION 352#define __ARCH_WANT_IPC_PARSE_VERSION
352/* #define __ARCH_WANT_OLD_READDIR */ 353/* #define __ARCH_WANT_OLD_READDIR */
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
index 63d579bf1c29..017d6d7b784f 100644
--- a/arch/frv/kernel/entry.S
+++ b/arch/frv/kernel/entry.S
@@ -1526,5 +1526,6 @@ sys_call_table:
1526 .long sys_pwritev 1526 .long sys_pwritev
1527 .long sys_rt_tgsigqueueinfo /* 335 */ 1527 .long sys_rt_tgsigqueueinfo /* 335 */
1528 .long sys_perf_event_open 1528 .long sys_perf_event_open
1529 .long sys_setns
1529 1530
1530syscall_table_size = (. - sys_call_table) 1531syscall_table_size = (. - sys_call_table)
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index e20322ffcaf8..091ed6192ae8 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -41,14 +41,6 @@ config ARCH_HAS_ILOG2_U64
41 bool 41 bool
42 default n 42 default n
43 43
44config GENERIC_FIND_NEXT_BIT
45 bool
46 default y
47
48config GENERIC_FIND_BIT_LE
49 bool
50 default y
51
52config GENERIC_HWEIGHT 44config GENERIC_HWEIGHT
53 bool 45 bool
54 default y 46 default y
diff --git a/arch/h8300/include/asm/unistd.h b/arch/h8300/include/asm/unistd.h
index 50f2c5a36591..2c3f8e60b1e0 100644
--- a/arch/h8300/include/asm/unistd.h
+++ b/arch/h8300/include/asm/unistd.h
@@ -325,10 +325,11 @@
325#define __NR_move_pages 317 325#define __NR_move_pages 317
326#define __NR_getcpu 318 326#define __NR_getcpu 318
327#define __NR_epoll_pwait 319 327#define __NR_epoll_pwait 319
328#define __NR_setns 320
328 329
329#ifdef __KERNEL__ 330#ifdef __KERNEL__
330 331
331#define NR_syscalls 320 332#define NR_syscalls 321
332 333
333#define __ARCH_WANT_IPC_PARSE_VERSION 334#define __ARCH_WANT_IPC_PARSE_VERSION
334#define __ARCH_WANT_OLD_READDIR 335#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/h8300/kernel/syscalls.S b/arch/h8300/kernel/syscalls.S
index faefaff7d43d..f4b2e67bcc34 100644
--- a/arch/h8300/kernel/syscalls.S
+++ b/arch/h8300/kernel/syscalls.S
@@ -333,6 +333,7 @@ SYMBOL_NAME_LABEL(sys_call_table)
333 .long SYMBOL_NAME(sys_ni_syscall) /* sys_move_pages */ 333 .long SYMBOL_NAME(sys_ni_syscall) /* sys_move_pages */
334 .long SYMBOL_NAME(sys_getcpu) 334 .long SYMBOL_NAME(sys_getcpu)
335 .long SYMBOL_NAME(sys_ni_syscall) /* sys_epoll_pwait */ 335 .long SYMBOL_NAME(sys_ni_syscall) /* sys_epoll_pwait */
336 .long SYMBOL_NAME(sys_setns) /* 320 */
336 337
337 .macro call_sp addr 338 .macro call_sp addr
338 mov.l #SYMBOL_NAME(\addr),er6 339 mov.l #SYMBOL_NAME(\addr),er6
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index e5cc56ae6ce3..38280ef4a2af 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -78,10 +78,6 @@ config HUGETLB_PAGE_SIZE_VARIABLE
78 depends on HUGETLB_PAGE 78 depends on HUGETLB_PAGE
79 default y 79 default y
80 80
81config GENERIC_FIND_NEXT_BIT
82 bool
83 default y
84
85config GENERIC_CALIBRATE_DELAY 81config GENERIC_CALIBRATE_DELAY
86 bool 82 bool
87 default y 83 default y
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index 404d037c5e10..1cf0f496f744 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -319,11 +319,12 @@
319#define __NR_open_by_handle_at 1327 319#define __NR_open_by_handle_at 1327
320#define __NR_clock_adjtime 1328 320#define __NR_clock_adjtime 1328
321#define __NR_syncfs 1329 321#define __NR_syncfs 1329
322#define __NR_setns 1330
322 323
323#ifdef __KERNEL__ 324#ifdef __KERNEL__
324 325
325 326
326#define NR_syscalls 306 /* length of syscall table */ 327#define NR_syscalls 307 /* length of syscall table */
327 328
328/* 329/*
329 * The following defines stop scripts/checksyscalls.sh from complaining about 330 * The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 6de2e23b3636..9ca80193cd4e 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1775,6 +1775,7 @@ sys_call_table:
1775 data8 sys_open_by_handle_at 1775 data8 sys_open_by_handle_at
1776 data8 sys_clock_adjtime 1776 data8 sys_clock_adjtime
1777 data8 sys_syncfs 1777 data8 sys_syncfs
1778 data8 sys_setns // 1330
1778 1779
1779 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1780 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
1780#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ 1781#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 736b808d2291..85b44e858225 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -256,14 +256,6 @@ config ARCH_HAS_ILOG2_U64
256 bool 256 bool
257 default n 257 default n
258 258
259config GENERIC_FIND_NEXT_BIT
260 bool
261 default y
262
263config GENERIC_FIND_BIT_LE
264 bool
265 default y
266
267config GENERIC_HWEIGHT 259config GENERIC_HWEIGHT
268 bool 260 bool
269 default y 261 default y
diff --git a/arch/m32r/include/asm/smp.h b/arch/m32r/include/asm/smp.h
index 8accc1bb0263..cf7829a61551 100644
--- a/arch/m32r/include/asm/smp.h
+++ b/arch/m32r/include/asm/smp.h
@@ -81,11 +81,11 @@ static __inline__ int cpu_number_map(int cpu)
81 81
82static __inline__ unsigned int num_booting_cpus(void) 82static __inline__ unsigned int num_booting_cpus(void)
83{ 83{
84 return cpus_weight(cpu_callout_map); 84 return cpumask_weight(&cpu_callout_map);
85} 85}
86 86
87extern void smp_send_timer(void); 87extern void smp_send_timer(void);
88extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); 88extern unsigned long send_IPI_mask_phys(const cpumask_t*, int, int);
89 89
90extern void arch_send_call_function_single_ipi(int cpu); 90extern void arch_send_call_function_single_ipi(int cpu);
91extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 91extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h
index c70545689da8..3e1db561aacc 100644
--- a/arch/m32r/include/asm/unistd.h
+++ b/arch/m32r/include/asm/unistd.h
@@ -330,10 +330,11 @@
330/* #define __NR_timerfd 322 removed */ 330/* #define __NR_timerfd 322 removed */
331#define __NR_eventfd 323 331#define __NR_eventfd 323
332#define __NR_fallocate 324 332#define __NR_fallocate 324
333#define __NR_setns 325
333 334
334#ifdef __KERNEL__ 335#ifdef __KERNEL__
335 336
336#define NR_syscalls 325 337#define NR_syscalls 326
337 338
338#define __ARCH_WANT_IPC_PARSE_VERSION 339#define __ARCH_WANT_IPC_PARSE_VERSION
339#define __ARCH_WANT_STAT64 340#define __ARCH_WANT_STAT64
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index fc10b39893d4..092d40a6708e 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -30,6 +30,7 @@
30#include <asm/io.h> 30#include <asm/io.h>
31#include <asm/mmu_context.h> 31#include <asm/mmu_context.h>
32#include <asm/m32r.h> 32#include <asm/m32r.h>
33#include <asm/tlbflush.h>
33 34
34/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ 35/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
35/* Data structures and variables */ 36/* Data structures and variables */
@@ -61,33 +62,22 @@ extern spinlock_t ipi_lock[];
61/* Function Prototypes */ 62/* Function Prototypes */
62/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ 63/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
63 64
64void smp_send_reschedule(int);
65void smp_reschedule_interrupt(void); 65void smp_reschedule_interrupt(void);
66
67void smp_flush_cache_all(void);
68void smp_flush_cache_all_interrupt(void); 66void smp_flush_cache_all_interrupt(void);
69 67
70void smp_flush_tlb_all(void);
71static void flush_tlb_all_ipi(void *); 68static void flush_tlb_all_ipi(void *);
72
73void smp_flush_tlb_mm(struct mm_struct *);
74void smp_flush_tlb_range(struct vm_area_struct *, unsigned long, \
75 unsigned long);
76void smp_flush_tlb_page(struct vm_area_struct *, unsigned long);
77static void flush_tlb_others(cpumask_t, struct mm_struct *, 69static void flush_tlb_others(cpumask_t, struct mm_struct *,
78 struct vm_area_struct *, unsigned long); 70 struct vm_area_struct *, unsigned long);
71
79void smp_invalidate_interrupt(void); 72void smp_invalidate_interrupt(void);
80 73
81void smp_send_stop(void);
82static void stop_this_cpu(void *); 74static void stop_this_cpu(void *);
83 75
84void smp_send_timer(void);
85void smp_ipi_timer_interrupt(struct pt_regs *); 76void smp_ipi_timer_interrupt(struct pt_regs *);
86void smp_local_timer_interrupt(void); 77void smp_local_timer_interrupt(void);
87 78
88static void send_IPI_allbutself(int, int); 79static void send_IPI_allbutself(int, int);
89static void send_IPI_mask(const struct cpumask *, int, int); 80static void send_IPI_mask(const struct cpumask *, int, int);
90unsigned long send_IPI_mask_phys(cpumask_t, int, int);
91 81
92/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ 82/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
93/* Rescheduling request Routines */ 83/* Rescheduling request Routines */
@@ -162,10 +152,10 @@ void smp_flush_cache_all(void)
162 unsigned long *mask; 152 unsigned long *mask;
163 153
164 preempt_disable(); 154 preempt_disable();
165 cpumask = cpu_online_map; 155 cpumask_copy(&cpumask, cpu_online_mask);
166 cpu_clear(smp_processor_id(), cpumask); 156 cpumask_clear_cpu(smp_processor_id(), &cpumask);
167 spin_lock(&flushcache_lock); 157 spin_lock(&flushcache_lock);
168 mask=cpus_addr(cpumask); 158 mask=cpumask_bits(&cpumask);
169 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); 159 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
170 send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); 160 send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
171 _flush_cache_copyback_all(); 161 _flush_cache_copyback_all();
@@ -263,8 +253,8 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
263 preempt_disable(); 253 preempt_disable();
264 cpu_id = smp_processor_id(); 254 cpu_id = smp_processor_id();
265 mmc = &mm->context[cpu_id]; 255 mmc = &mm->context[cpu_id];
266 cpu_mask = *mm_cpumask(mm); 256 cpumask_copy(&cpu_mask, mm_cpumask(mm));
267 cpu_clear(cpu_id, cpu_mask); 257 cpumask_clear_cpu(cpu_id, &cpu_mask);
268 258
269 if (*mmc != NO_CONTEXT) { 259 if (*mmc != NO_CONTEXT) {
270 local_irq_save(flags); 260 local_irq_save(flags);
@@ -275,7 +265,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
275 cpumask_clear_cpu(cpu_id, mm_cpumask(mm)); 265 cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
276 local_irq_restore(flags); 266 local_irq_restore(flags);
277 } 267 }
278 if (!cpus_empty(cpu_mask)) 268 if (!cpumask_empty(&cpu_mask))
279 flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL); 269 flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL);
280 270
281 preempt_enable(); 271 preempt_enable();
@@ -333,8 +323,8 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
333 preempt_disable(); 323 preempt_disable();
334 cpu_id = smp_processor_id(); 324 cpu_id = smp_processor_id();
335 mmc = &mm->context[cpu_id]; 325 mmc = &mm->context[cpu_id];
336 cpu_mask = *mm_cpumask(mm); 326 cpumask_copy(&cpu_mask, mm_cpumask(mm));
337 cpu_clear(cpu_id, cpu_mask); 327 cpumask_clear_cpu(cpu_id, &cpu_mask);
338 328
339#ifdef DEBUG_SMP 329#ifdef DEBUG_SMP
340 if (!mm) 330 if (!mm)
@@ -348,7 +338,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
348 __flush_tlb_page(va); 338 __flush_tlb_page(va);
349 local_irq_restore(flags); 339 local_irq_restore(flags);
350 } 340 }
351 if (!cpus_empty(cpu_mask)) 341 if (!cpumask_empty(&cpu_mask))
352 flush_tlb_others(cpu_mask, mm, vma, va); 342 flush_tlb_others(cpu_mask, mm, vma, va);
353 343
354 preempt_enable(); 344 preempt_enable();
@@ -395,14 +385,14 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
395 * - current CPU must not be in mask 385 * - current CPU must not be in mask
396 * - mask must exist :) 386 * - mask must exist :)
397 */ 387 */
398 BUG_ON(cpus_empty(cpumask)); 388 BUG_ON(cpumask_empty(&cpumask));
399 389
400 BUG_ON(cpu_isset(smp_processor_id(), cpumask)); 390 BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
401 BUG_ON(!mm); 391 BUG_ON(!mm);
402 392
403 /* If a CPU which we ran on has gone down, OK. */ 393 /* If a CPU which we ran on has gone down, OK. */
404 cpus_and(cpumask, cpumask, cpu_online_map); 394 cpumask_and(&cpumask, &cpumask, cpu_online_mask);
405 if (cpus_empty(cpumask)) 395 if (cpumask_empty(&cpumask))
406 return; 396 return;
407 397
408 /* 398 /*
@@ -416,7 +406,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
416 flush_mm = mm; 406 flush_mm = mm;
417 flush_vma = vma; 407 flush_vma = vma;
418 flush_va = va; 408 flush_va = va;
419 mask=cpus_addr(cpumask); 409 mask=cpumask_bits(&cpumask);
420 atomic_set_mask(*mask, (atomic_t *)&flush_cpumask); 410 atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
421 411
422 /* 412 /*
@@ -425,7 +415,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
425 */ 415 */
426 send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0); 416 send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
427 417
428 while (!cpus_empty(flush_cpumask)) { 418 while (!cpumask_empty((cpumask_t*)&flush_cpumask)) {
429 /* nothing. lockup detection does not belong here */ 419 /* nothing. lockup detection does not belong here */
430 mb(); 420 mb();
431 } 421 }
@@ -460,7 +450,7 @@ void smp_invalidate_interrupt(void)
460 int cpu_id = smp_processor_id(); 450 int cpu_id = smp_processor_id();
461 unsigned long *mmc = &flush_mm->context[cpu_id]; 451 unsigned long *mmc = &flush_mm->context[cpu_id];
462 452
463 if (!cpu_isset(cpu_id, flush_cpumask)) 453 if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
464 return; 454 return;
465 455
466 if (flush_va == FLUSH_ALL) { 456 if (flush_va == FLUSH_ALL) {
@@ -478,7 +468,7 @@ void smp_invalidate_interrupt(void)
478 __flush_tlb_page(va); 468 __flush_tlb_page(va);
479 } 469 }
480 } 470 }
481 cpu_clear(cpu_id, flush_cpumask); 471 cpumask_clear_cpu(cpu_id, (cpumask_t*)&flush_cpumask);
482} 472}
483 473
484/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ 474/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
@@ -530,7 +520,7 @@ static void stop_this_cpu(void *dummy)
530 /* 520 /*
531 * Remove this CPU: 521 * Remove this CPU:
532 */ 522 */
533 cpu_clear(cpu_id, cpu_online_map); 523 set_cpu_online(cpu_id, false);
534 524
535 /* 525 /*
536 * PSW IE = 1; 526 * PSW IE = 1;
@@ -725,8 +715,8 @@ static void send_IPI_allbutself(int ipi_num, int try)
725{ 715{
726 cpumask_t cpumask; 716 cpumask_t cpumask;
727 717
728 cpumask = cpu_online_map; 718 cpumask_copy(&cpumask, cpu_online_mask);
729 cpu_clear(smp_processor_id(), cpumask); 719 cpumask_clear_cpu(smp_processor_id(), &cpumask);
730 720
731 send_IPI_mask(&cpumask, ipi_num, try); 721 send_IPI_mask(&cpumask, ipi_num, try);
732} 722}
@@ -763,13 +753,13 @@ static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
763 cpumask_and(&tmp, cpumask, cpu_online_mask); 753 cpumask_and(&tmp, cpumask, cpu_online_mask);
764 BUG_ON(!cpumask_equal(cpumask, &tmp)); 754 BUG_ON(!cpumask_equal(cpumask, &tmp));
765 755
766 physid_mask = CPU_MASK_NONE; 756 cpumask_clear(&physid_mask);
767 for_each_cpu(cpu_id, cpumask) { 757 for_each_cpu(cpu_id, cpumask) {
768 if ((phys_id = cpu_to_physid(cpu_id)) != -1) 758 if ((phys_id = cpu_to_physid(cpu_id)) != -1)
769 cpu_set(phys_id, physid_mask); 759 cpumask_set_cpu(phys_id, &physid_mask);
770 } 760 }
771 761
772 send_IPI_mask_phys(physid_mask, ipi_num, try); 762 send_IPI_mask_phys(&physid_mask, ipi_num, try);
773} 763}
774 764
775/*==========================================================================* 765/*==========================================================================*
@@ -792,14 +782,14 @@ static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
792 * ---------- --- -------------------------------------------------------- 782 * ---------- --- --------------------------------------------------------
793 * 783 *
794 *==========================================================================*/ 784 *==========================================================================*/
795unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num, 785unsigned long send_IPI_mask_phys(const cpumask_t *physid_mask, int ipi_num,
796 int try) 786 int try)
797{ 787{
798 spinlock_t *ipilock; 788 spinlock_t *ipilock;
799 volatile unsigned long *ipicr_addr; 789 volatile unsigned long *ipicr_addr;
800 unsigned long ipicr_val; 790 unsigned long ipicr_val;
801 unsigned long my_physid_mask; 791 unsigned long my_physid_mask;
802 unsigned long mask = cpus_addr(physid_mask)[0]; 792 unsigned long mask = cpumask_bits(physid_mask)[0];
803 793
804 794
805 if (mask & ~physids_coerce(phys_cpu_present_map)) 795 if (mask & ~physids_coerce(phys_cpu_present_map))
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index e034844cfc0d..cfdbe5d15002 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -135,9 +135,9 @@ void __devinit smp_prepare_boot_cpu(void)
135{ 135{
136 bsp_phys_id = hard_smp_processor_id(); 136 bsp_phys_id = hard_smp_processor_id();
137 physid_set(bsp_phys_id, phys_cpu_present_map); 137 physid_set(bsp_phys_id, phys_cpu_present_map);
138 cpu_set(0, cpu_online_map); /* BSP's cpu_id == 0 */ 138 set_cpu_online(0, true); /* BSP's cpu_id == 0 */
139 cpu_set(0, cpu_callout_map); 139 cpumask_set_cpu(0, &cpu_callout_map);
140 cpu_set(0, cpu_callin_map); 140 cpumask_set_cpu(0, &cpu_callin_map);
141 141
142 /* 142 /*
143 * Initialize the logical to physical CPU number mapping 143 * Initialize the logical to physical CPU number mapping
@@ -178,7 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
178 for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++) 178 for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++)
179 physid_set(phys_id, phys_cpu_present_map); 179 physid_set(phys_id, phys_cpu_present_map);
180#ifndef CONFIG_HOTPLUG_CPU 180#ifndef CONFIG_HOTPLUG_CPU
181 init_cpu_present(&cpu_possible_map); 181 init_cpu_present(cpu_possible_mask);
182#endif 182#endif
183 183
184 show_mp_info(nr_cpu); 184 show_mp_info(nr_cpu);
@@ -294,10 +294,10 @@ static void __init do_boot_cpu(int phys_id)
294 send_status = 0; 294 send_status = 0;
295 boot_status = 0; 295 boot_status = 0;
296 296
297 cpu_set(phys_id, cpu_bootout_map); 297 cpumask_set_cpu(phys_id, &cpu_bootout_map);
298 298
299 /* Send Startup IPI */ 299 /* Send Startup IPI */
300 send_IPI_mask_phys(cpumask_of_cpu(phys_id), CPU_BOOT_IPI, 0); 300 send_IPI_mask_phys(cpumask_of(phys_id), CPU_BOOT_IPI, 0);
301 301
302 Dprintk("Waiting for send to finish...\n"); 302 Dprintk("Waiting for send to finish...\n");
303 timeout = 0; 303 timeout = 0;
@@ -306,7 +306,7 @@ static void __init do_boot_cpu(int phys_id)
306 do { 306 do {
307 Dprintk("+"); 307 Dprintk("+");
308 udelay(1000); 308 udelay(1000);
309 send_status = !cpu_isset(phys_id, cpu_bootin_map); 309 send_status = !cpumask_test_cpu(phys_id, &cpu_bootin_map);
310 } while (send_status && (timeout++ < 100)); 310 } while (send_status && (timeout++ < 100));
311 311
312 Dprintk("After Startup.\n"); 312 Dprintk("After Startup.\n");
@@ -316,19 +316,19 @@ static void __init do_boot_cpu(int phys_id)
316 * allow APs to start initializing. 316 * allow APs to start initializing.
317 */ 317 */
318 Dprintk("Before Callout %d.\n", cpu_id); 318 Dprintk("Before Callout %d.\n", cpu_id);
319 cpu_set(cpu_id, cpu_callout_map); 319 cpumask_set_cpu(cpu_id, &cpu_callout_map);
320 Dprintk("After Callout %d.\n", cpu_id); 320 Dprintk("After Callout %d.\n", cpu_id);
321 321
322 /* 322 /*
323 * Wait 5s total for a response 323 * Wait 5s total for a response
324 */ 324 */
325 for (timeout = 0; timeout < 5000; timeout++) { 325 for (timeout = 0; timeout < 5000; timeout++) {
326 if (cpu_isset(cpu_id, cpu_callin_map)) 326 if (cpumask_test_cpu(cpu_id, &cpu_callin_map))
327 break; /* It has booted */ 327 break; /* It has booted */
328 udelay(1000); 328 udelay(1000);
329 } 329 }
330 330
331 if (cpu_isset(cpu_id, cpu_callin_map)) { 331 if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) {
332 /* number CPUs logically, starting from 1 (BSP is 0) */ 332 /* number CPUs logically, starting from 1 (BSP is 0) */
333 Dprintk("OK.\n"); 333 Dprintk("OK.\n");
334 } else { 334 } else {
@@ -340,9 +340,9 @@ static void __init do_boot_cpu(int phys_id)
340 340
341 if (send_status || boot_status) { 341 if (send_status || boot_status) {
342 unmap_cpu_to_physid(cpu_id, phys_id); 342 unmap_cpu_to_physid(cpu_id, phys_id);
343 cpu_clear(cpu_id, cpu_callout_map); 343 cpumask_clear_cpu(cpu_id, &cpu_callout_map);
344 cpu_clear(cpu_id, cpu_callin_map); 344 cpumask_clear_cpu(cpu_id, &cpu_callin_map);
345 cpu_clear(cpu_id, cpu_initialized); 345 cpumask_clear_cpu(cpu_id, &cpu_initialized);
346 cpucount--; 346 cpucount--;
347 } 347 }
348} 348}
@@ -351,17 +351,17 @@ int __cpuinit __cpu_up(unsigned int cpu_id)
351{ 351{
352 int timeout; 352 int timeout;
353 353
354 cpu_set(cpu_id, smp_commenced_mask); 354 cpumask_set_cpu(cpu_id, &smp_commenced_mask);
355 355
356 /* 356 /*
357 * Wait 5s total for a response 357 * Wait 5s total for a response
358 */ 358 */
359 for (timeout = 0; timeout < 5000; timeout++) { 359 for (timeout = 0; timeout < 5000; timeout++) {
360 if (cpu_isset(cpu_id, cpu_online_map)) 360 if (cpu_online(cpu_id))
361 break; 361 break;
362 udelay(1000); 362 udelay(1000);
363 } 363 }
364 if (!cpu_isset(cpu_id, cpu_online_map)) 364 if (!cpu_online(cpu_id))
365 BUG(); 365 BUG();
366 366
367 return 0; 367 return 0;
@@ -373,11 +373,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
373 unsigned long bogosum = 0; 373 unsigned long bogosum = 0;
374 374
375 for (timeout = 0; timeout < 5000; timeout++) { 375 for (timeout = 0; timeout < 5000; timeout++) {
376 if (cpus_equal(cpu_callin_map, cpu_online_map)) 376 if (cpumask_equal(&cpu_callin_map, cpu_online_mask))
377 break; 377 break;
378 udelay(1000); 378 udelay(1000);
379 } 379 }
380 if (!cpus_equal(cpu_callin_map, cpu_online_map)) 380 if (!cpumask_equal(&cpu_callin_map, cpu_online_mask))
381 BUG(); 381 BUG();
382 382
383 for (cpu_id = 0 ; cpu_id < num_online_cpus() ; cpu_id++) 383 for (cpu_id = 0 ; cpu_id < num_online_cpus() ; cpu_id++)
@@ -388,7 +388,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
388 */ 388 */
389 Dprintk("Before bogomips.\n"); 389 Dprintk("Before bogomips.\n");
390 if (cpucount) { 390 if (cpucount) {
391 for_each_cpu_mask(cpu_id, cpu_online_map) 391 for_each_cpu(cpu_id,cpu_online_mask)
392 bogosum += cpu_data[cpu_id].loops_per_jiffy; 392 bogosum += cpu_data[cpu_id].loops_per_jiffy;
393 393
394 printk(KERN_INFO "Total of %d processors activated " \ 394 printk(KERN_INFO "Total of %d processors activated " \
@@ -425,7 +425,7 @@ int __init start_secondary(void *unused)
425 cpu_init(); 425 cpu_init();
426 preempt_disable(); 426 preempt_disable();
427 smp_callin(); 427 smp_callin();
428 while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) 428 while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask))
429 cpu_relax(); 429 cpu_relax();
430 430
431 smp_online(); 431 smp_online();
@@ -463,7 +463,7 @@ static void __init smp_callin(void)
463 int cpu_id = smp_processor_id(); 463 int cpu_id = smp_processor_id();
464 unsigned long timeout; 464 unsigned long timeout;
465 465
466 if (cpu_isset(cpu_id, cpu_callin_map)) { 466 if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) {
467 printk("huh, phys CPU#%d, CPU#%d already present??\n", 467 printk("huh, phys CPU#%d, CPU#%d already present??\n",
468 phys_id, cpu_id); 468 phys_id, cpu_id);
469 BUG(); 469 BUG();
@@ -474,7 +474,7 @@ static void __init smp_callin(void)
474 timeout = jiffies + (2 * HZ); 474 timeout = jiffies + (2 * HZ);
475 while (time_before(jiffies, timeout)) { 475 while (time_before(jiffies, timeout)) {
476 /* Has the boot CPU finished it's STARTUP sequence ? */ 476 /* Has the boot CPU finished it's STARTUP sequence ? */
477 if (cpu_isset(cpu_id, cpu_callout_map)) 477 if (cpumask_test_cpu(cpu_id, &cpu_callout_map))
478 break; 478 break;
479 cpu_relax(); 479 cpu_relax();
480 } 480 }
@@ -486,7 +486,7 @@ static void __init smp_callin(void)
486 } 486 }
487 487
488 /* Allow the master to continue. */ 488 /* Allow the master to continue. */
489 cpu_set(cpu_id, cpu_callin_map); 489 cpumask_set_cpu(cpu_id, &cpu_callin_map);
490} 490}
491 491
492static void __init smp_online(void) 492static void __init smp_online(void)
@@ -503,7 +503,7 @@ static void __init smp_online(void)
503 /* Save our processor parameters */ 503 /* Save our processor parameters */
504 smp_store_cpu_info(cpu_id); 504 smp_store_cpu_info(cpu_id);
505 505
506 cpu_set(cpu_id, cpu_online_map); 506 set_cpu_online(cpu_id, true);
507} 507}
508 508
509/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ 509/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
diff --git a/arch/m32r/kernel/syscall_table.S b/arch/m32r/kernel/syscall_table.S
index 60536e271233..528f2e6ad064 100644
--- a/arch/m32r/kernel/syscall_table.S
+++ b/arch/m32r/kernel/syscall_table.S
@@ -324,3 +324,4 @@ ENTRY(sys_call_table)
324 .long sys_ni_syscall 324 .long sys_ni_syscall
325 .long sys_eventfd 325 .long sys_eventfd
326 .long sys_fallocate 326 .long sys_fallocate
327 .long sys_setns /* 325 */
diff --git a/arch/m68k/Kconfig.nommu b/arch/m68k/Kconfig.nommu
index 273bccab9517..fc98f9b9d4d2 100644
--- a/arch/m68k/Kconfig.nommu
+++ b/arch/m68k/Kconfig.nommu
@@ -2,10 +2,6 @@ config FPU
2 bool 2 bool
3 default n 3 default n
4 4
5config GENERIC_FIND_NEXT_BIT
6 bool
7 default y
8
9config GENERIC_GPIO 5config GENERIC_GPIO
10 bool 6 bool
11 default n 7 default n
diff --git a/arch/m68k/include/asm/bitops_mm.h b/arch/m68k/include/asm/bitops_mm.h
index e9020f88a748..89cf5b814a4d 100644
--- a/arch/m68k/include/asm/bitops_mm.h
+++ b/arch/m68k/include/asm/bitops_mm.h
@@ -200,6 +200,7 @@ out:
200 res += ((long)p - (long)vaddr - 4) * 8; 200 res += ((long)p - (long)vaddr - 4) * 8;
201 return res < size ? res : size; 201 return res < size ? res : size;
202} 202}
203#define find_first_zero_bit find_first_zero_bit
203 204
204static inline int find_next_zero_bit(const unsigned long *vaddr, int size, 205static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
205 int offset) 206 int offset)
@@ -229,6 +230,7 @@ static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
229 /* No zero yet, search remaining full bytes for a zero */ 230 /* No zero yet, search remaining full bytes for a zero */
230 return offset + find_first_zero_bit(p, size - offset); 231 return offset + find_first_zero_bit(p, size - offset);
231} 232}
233#define find_next_zero_bit find_next_zero_bit
232 234
233static inline int find_first_bit(const unsigned long *vaddr, unsigned size) 235static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
234{ 236{
@@ -253,6 +255,7 @@ out:
253 res += ((long)p - (long)vaddr - 4) * 8; 255 res += ((long)p - (long)vaddr - 4) * 8;
254 return res < size ? res : size; 256 return res < size ? res : size;
255} 257}
258#define find_first_bit find_first_bit
256 259
257static inline int find_next_bit(const unsigned long *vaddr, int size, 260static inline int find_next_bit(const unsigned long *vaddr, int size,
258 int offset) 261 int offset)
@@ -282,6 +285,7 @@ static inline int find_next_bit(const unsigned long *vaddr, int size,
282 /* No one yet, search remaining full bytes for a one */ 285 /* No one yet, search remaining full bytes for a one */
283 return offset + find_first_bit(p, size - offset); 286 return offset + find_first_bit(p, size - offset);
284} 287}
288#define find_next_bit find_next_bit
285 289
286/* 290/*
287 * ffz = Find First Zero in word. Undefined if no zero exists, 291 * ffz = Find First Zero in word. Undefined if no zero exists,
@@ -398,6 +402,7 @@ out:
398 res += (p - addr) * 32; 402 res += (p - addr) * 32;
399 return res < size ? res : size; 403 return res < size ? res : size;
400} 404}
405#define find_first_zero_bit_le find_first_zero_bit_le
401 406
402static inline unsigned long find_next_zero_bit_le(const void *addr, 407static inline unsigned long find_next_zero_bit_le(const void *addr,
403 unsigned long size, unsigned long offset) 408 unsigned long size, unsigned long offset)
@@ -427,6 +432,7 @@ static inline unsigned long find_next_zero_bit_le(const void *addr,
427 /* No zero yet, search remaining full bytes for a zero */ 432 /* No zero yet, search remaining full bytes for a zero */
428 return offset + find_first_zero_bit_le(p, size - offset); 433 return offset + find_first_zero_bit_le(p, size - offset);
429} 434}
435#define find_next_zero_bit_le find_next_zero_bit_le
430 436
431static inline int find_first_bit_le(const void *vaddr, unsigned size) 437static inline int find_first_bit_le(const void *vaddr, unsigned size)
432{ 438{
@@ -451,6 +457,7 @@ out:
451 res += (p - addr) * 32; 457 res += (p - addr) * 32;
452 return res < size ? res : size; 458 return res < size ? res : size;
453} 459}
460#define find_first_bit_le find_first_bit_le
454 461
455static inline unsigned long find_next_bit_le(const void *addr, 462static inline unsigned long find_next_bit_le(const void *addr,
456 unsigned long size, unsigned long offset) 463 unsigned long size, unsigned long offset)
@@ -480,6 +487,7 @@ static inline unsigned long find_next_bit_le(const void *addr,
480 /* No set bit yet, search remaining full bytes for a set bit */ 487 /* No set bit yet, search remaining full bytes for a set bit */
481 return offset + find_first_bit_le(p, size - offset); 488 return offset + find_first_bit_le(p, size - offset);
482} 489}
490#define find_next_bit_le find_next_bit_le
483 491
484/* Bitmap functions for the ext2 filesystem. */ 492/* Bitmap functions for the ext2 filesystem. */
485 493
diff --git a/arch/m68k/include/asm/bitops_no.h b/arch/m68k/include/asm/bitops_no.h
index 6b0e2d349f0e..72e85acdd7bd 100644
--- a/arch/m68k/include/asm/bitops_no.h
+++ b/arch/m68k/include/asm/bitops_no.h
@@ -319,6 +319,10 @@ found_first:
319found_middle: 319found_middle:
320 return result + ffz(__swab32(tmp)); 320 return result + ffz(__swab32(tmp));
321} 321}
322#define find_next_zero_bit_le find_next_zero_bit_le
323
324extern unsigned long find_next_bit_le(const void *addr,
325 unsigned long size, unsigned long offset);
322 326
323#endif /* __KERNEL__ */ 327#endif /* __KERNEL__ */
324 328
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index f3b649de2a1b..43f984e93970 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -349,10 +349,11 @@
349#define __NR_open_by_handle_at 341 349#define __NR_open_by_handle_at 341
350#define __NR_clock_adjtime 342 350#define __NR_clock_adjtime 342
351#define __NR_syncfs 343 351#define __NR_syncfs 343
352#define __NR_setns 344
352 353
353#ifdef __KERNEL__ 354#ifdef __KERNEL__
354 355
355#define NR_syscalls 344 356#define NR_syscalls 345
356 357
357#define __ARCH_WANT_IPC_PARSE_VERSION 358#define __ARCH_WANT_IPC_PARSE_VERSION
358#define __ARCH_WANT_OLD_READDIR 359#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 6f7b09122a00..00d1452f9571 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -364,4 +364,5 @@ ENTRY(sys_call_table)
364 .long sys_open_by_handle_at 364 .long sys_open_by_handle_at
365 .long sys_clock_adjtime 365 .long sys_clock_adjtime
366 .long sys_syncfs 366 .long sys_syncfs
367 .long sys_setns
367 368
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index eccdefe70d4e..e446bab2427b 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -33,12 +33,6 @@ config ARCH_HAS_ILOG2_U32
33config ARCH_HAS_ILOG2_U64 33config ARCH_HAS_ILOG2_U64
34 def_bool n 34 def_bool n
35 35
36config GENERIC_FIND_NEXT_BIT
37 def_bool y
38
39config GENERIC_FIND_BIT_LE
40 def_bool y
41
42config GENERIC_HWEIGHT 36config GENERIC_HWEIGHT
43 def_bool y 37 def_bool y
44 38
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index 30edd61a6b8f..7d7092b917ac 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -390,8 +390,9 @@
390#define __NR_open_by_handle_at 372 390#define __NR_open_by_handle_at 372
391#define __NR_clock_adjtime 373 391#define __NR_clock_adjtime 373
392#define __NR_syncfs 374 392#define __NR_syncfs 374
393#define __NR_setns 375
393 394
394#define __NR_syscalls 375 395#define __NR_syscalls 376
395 396
396#ifdef __KERNEL__ 397#ifdef __KERNEL__
397#ifndef __ASSEMBLY__ 398#ifndef __ASSEMBLY__
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index 00ee90f08343..b15cc219b1d9 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -130,7 +130,7 @@ void __init early_init_devtree(void *params)
130 * device-tree, including the platform type, initrd location and 130 * device-tree, including the platform type, initrd location and
131 * size, TCE reserve, and more ... 131 * size, TCE reserve, and more ...
132 */ 132 */
133 of_scan_flat_dt(early_init_dt_scan_chosen, NULL); 133 of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line);
134 134
135 /* Scan memory nodes and rebuild MEMBLOCKs */ 135 /* Scan memory nodes and rebuild MEMBLOCKs */
136 memblock_init(); 136 memblock_init();
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 85cea81d1ca1..d915a122c865 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -379,3 +379,4 @@ ENTRY(sys_call_table)
379 .long sys_open_by_handle_at 379 .long sys_open_by_handle_at
380 .long sys_clock_adjtime 380 .long sys_clock_adjtime
381 .long sys_syncfs 381 .long sys_syncfs
382 .long sys_setns /* 375 */
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index cef1a854487d..653da62d0682 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -821,14 +821,6 @@ config ARCH_SUPPORTS_OPROFILE
821 bool 821 bool
822 default y if !MIPS_MT_SMTC 822 default y if !MIPS_MT_SMTC
823 823
824config GENERIC_FIND_NEXT_BIT
825 bool
826 default y
827
828config GENERIC_FIND_BIT_LE
829 bool
830 default y
831
832config GENERIC_HWEIGHT 824config GENERIC_HWEIGHT
833 bool 825 bool
834 default y 826 default y
diff --git a/arch/mips/cavium-octeon/flash_setup.c b/arch/mips/cavium-octeon/flash_setup.c
index 008f657116eb..0ee02f5e51cc 100644
--- a/arch/mips/cavium-octeon/flash_setup.c
+++ b/arch/mips/cavium-octeon/flash_setup.c
@@ -16,7 +16,6 @@
16 16
17static struct map_info flash_map; 17static struct map_info flash_map;
18static struct mtd_info *mymtd; 18static struct mtd_info *mymtd;
19#ifdef CONFIG_MTD_PARTITIONS
20static int nr_parts; 19static int nr_parts;
21static struct mtd_partition *parts; 20static struct mtd_partition *parts;
22static const char *part_probe_types[] = { 21static const char *part_probe_types[] = {
@@ -26,7 +25,6 @@ static const char *part_probe_types[] = {
26#endif 25#endif
27 NULL 26 NULL
28}; 27};
29#endif
30 28
31/** 29/**
32 * Module/ driver initialization. 30 * Module/ driver initialization.
@@ -63,17 +61,10 @@ static int __init flash_init(void)
63 if (mymtd) { 61 if (mymtd) {
64 mymtd->owner = THIS_MODULE; 62 mymtd->owner = THIS_MODULE;
65 63
66#ifdef CONFIG_MTD_PARTITIONS
67 nr_parts = parse_mtd_partitions(mymtd, 64 nr_parts = parse_mtd_partitions(mymtd,
68 part_probe_types, 65 part_probe_types,
69 &parts, 0); 66 &parts, 0);
70 if (nr_parts > 0) 67 mtd_device_register(mymtd, parts, nr_parts);
71 add_mtd_partitions(mymtd, parts, nr_parts);
72 else
73 add_mtd_device(mymtd);
74#else
75 add_mtd_device(mymtd);
76#endif
77 } else { 68 } else {
78 pr_err("Failed to register MTD device for flash\n"); 69 pr_err("Failed to register MTD device for flash\n");
79 } 70 }
diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig
index 22fdf2f0cc23..ad15fb10322b 100644
--- a/arch/mips/configs/bcm47xx_defconfig
+++ b/arch/mips/configs/bcm47xx_defconfig
@@ -16,7 +16,6 @@ CONFIG_TASK_IO_ACCOUNTING=y
16CONFIG_AUDIT=y 16CONFIG_AUDIT=y
17CONFIG_TINY_RCU=y 17CONFIG_TINY_RCU=y
18CONFIG_CGROUPS=y 18CONFIG_CGROUPS=y
19CONFIG_CGROUP_NS=y
20CONFIG_CGROUP_CPUACCT=y 19CONFIG_CGROUP_CPUACCT=y
21CONFIG_RELAY=y 20CONFIG_RELAY=y
22CONFIG_BLK_DEV_INITRD=y 21CONFIG_BLK_DEV_INITRD=y
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h
index f29b862d9db3..857d9b7858ad 100644
--- a/arch/mips/include/asm/prom.h
+++ b/arch/mips/include/asm/prom.h
@@ -14,9 +14,6 @@
14#ifdef CONFIG_OF 14#ifdef CONFIG_OF
15#include <asm/bootinfo.h> 15#include <asm/bootinfo.h>
16 16
17/* which is compatible with the flattened device tree (FDT) */
18#define cmd_line arcs_cmdline
19
20extern int early_init_dt_scan_memory_arch(unsigned long node, 17extern int early_init_dt_scan_memory_arch(unsigned long node,
21 const char *uname, int depth, void *data); 18 const char *uname, int depth, void *data);
22 19
diff --git a/arch/mips/include/asm/suspend.h b/arch/mips/include/asm/suspend.h
index 294cdb66c5fc..3adac3b53d19 100644
--- a/arch/mips/include/asm/suspend.h
+++ b/arch/mips/include/asm/suspend.h
@@ -1,8 +1,6 @@
1#ifndef __ASM_SUSPEND_H 1#ifndef __ASM_SUSPEND_H
2#define __ASM_SUSPEND_H 2#define __ASM_SUSPEND_H
3 3
4static inline int arch_prepare_suspend(void) { return 0; }
5
6/* References to section boundaries */ 4/* References to section boundaries */
7extern const void __nosave_begin, __nosave_end; 5extern const void __nosave_begin, __nosave_end;
8 6
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index fa2e37ea2be1..6fcfc480e9d0 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -363,16 +363,17 @@
363#define __NR_open_by_handle_at (__NR_Linux + 340) 363#define __NR_open_by_handle_at (__NR_Linux + 340)
364#define __NR_clock_adjtime (__NR_Linux + 341) 364#define __NR_clock_adjtime (__NR_Linux + 341)
365#define __NR_syncfs (__NR_Linux + 342) 365#define __NR_syncfs (__NR_Linux + 342)
366#define __NR_setns (__NR_Linux + 343)
366 367
367/* 368/*
368 * Offset of the last Linux o32 flavoured syscall 369 * Offset of the last Linux o32 flavoured syscall
369 */ 370 */
370#define __NR_Linux_syscalls 342 371#define __NR_Linux_syscalls 343
371 372
372#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 373#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
373 374
374#define __NR_O32_Linux 4000 375#define __NR_O32_Linux 4000
375#define __NR_O32_Linux_syscalls 342 376#define __NR_O32_Linux_syscalls 343
376 377
377#if _MIPS_SIM == _MIPS_SIM_ABI64 378#if _MIPS_SIM == _MIPS_SIM_ABI64
378 379
@@ -682,16 +683,17 @@
682#define __NR_open_by_handle_at (__NR_Linux + 299) 683#define __NR_open_by_handle_at (__NR_Linux + 299)
683#define __NR_clock_adjtime (__NR_Linux + 300) 684#define __NR_clock_adjtime (__NR_Linux + 300)
684#define __NR_syncfs (__NR_Linux + 301) 685#define __NR_syncfs (__NR_Linux + 301)
686#define __NR_setns (__NR_Linux + 302)
685 687
686/* 688/*
687 * Offset of the last Linux 64-bit flavoured syscall 689 * Offset of the last Linux 64-bit flavoured syscall
688 */ 690 */
689#define __NR_Linux_syscalls 301 691#define __NR_Linux_syscalls 302
690 692
691#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 693#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
692 694
693#define __NR_64_Linux 5000 695#define __NR_64_Linux 5000
694#define __NR_64_Linux_syscalls 301 696#define __NR_64_Linux_syscalls 302
695 697
696#if _MIPS_SIM == _MIPS_SIM_NABI32 698#if _MIPS_SIM == _MIPS_SIM_NABI32
697 699
@@ -1006,16 +1008,17 @@
1006#define __NR_open_by_handle_at (__NR_Linux + 304) 1008#define __NR_open_by_handle_at (__NR_Linux + 304)
1007#define __NR_clock_adjtime (__NR_Linux + 305) 1009#define __NR_clock_adjtime (__NR_Linux + 305)
1008#define __NR_syncfs (__NR_Linux + 306) 1010#define __NR_syncfs (__NR_Linux + 306)
1011#define __NR_setns (__NR_Linux + 307)
1009 1012
1010/* 1013/*
1011 * Offset of the last N32 flavoured syscall 1014 * Offset of the last N32 flavoured syscall
1012 */ 1015 */
1013#define __NR_Linux_syscalls 306 1016#define __NR_Linux_syscalls 307
1014 1017
1015#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1018#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
1016 1019
1017#define __NR_N32_Linux 6000 1020#define __NR_N32_Linux 6000
1018#define __NR_N32_Linux_syscalls 306 1021#define __NR_N32_Linux_syscalls 307
1019 1022
1020#ifdef __KERNEL__ 1023#ifdef __KERNEL__
1021 1024
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index a19811e98a41..5b7eade41fa3 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -83,7 +83,8 @@ void __init early_init_devtree(void *params)
83 * device-tree, including the platform type, initrd location and 83 * device-tree, including the platform type, initrd location and
84 * size, and more ... 84 * size, and more ...
85 */ 85 */
86 of_scan_flat_dt(early_init_dt_scan_chosen, NULL); 86 of_scan_flat_dt(early_init_dt_scan_chosen, arcs_cmdline);
87
87 88
88 /* Scan memory nodes */ 89 /* Scan memory nodes */
89 of_scan_flat_dt(early_init_dt_scan_root, NULL); 90 of_scan_flat_dt(early_init_dt_scan_root, NULL);
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 7a8e1dd7f6f2..99e656e425f3 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -589,6 +589,7 @@ einval: li v0, -ENOSYS
589 sys sys_open_by_handle_at 3 /* 4340 */ 589 sys sys_open_by_handle_at 3 /* 4340 */
590 sys sys_clock_adjtime 2 590 sys sys_clock_adjtime 2
591 sys sys_syncfs 1 591 sys sys_syncfs 1
592 sys sys_setns 2
592 .endm 593 .endm
593 594
594 /* We pre-compute the number of _instruction_ bytes needed to 595 /* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 2d31c83224f9..fb0575f47f3d 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -428,4 +428,5 @@ sys_call_table:
428 PTR sys_open_by_handle_at 428 PTR sys_open_by_handle_at
429 PTR sys_clock_adjtime /* 5300 */ 429 PTR sys_clock_adjtime /* 5300 */
430 PTR sys_syncfs 430 PTR sys_syncfs
431 PTR sys_setns
431 .size sys_call_table,.-sys_call_table 432 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 38a0503b9a4a..4de0c5534e73 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -428,4 +428,5 @@ EXPORT(sysn32_call_table)
428 PTR sys_open_by_handle_at 428 PTR sys_open_by_handle_at
429 PTR compat_sys_clock_adjtime /* 6305 */ 429 PTR compat_sys_clock_adjtime /* 6305 */
430 PTR sys_syncfs 430 PTR sys_syncfs
431 PTR sys_setns
431 .size sysn32_call_table,.-sysn32_call_table 432 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 91ea5e4041dd..4a387de08bfa 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -546,4 +546,5 @@ sys_call_table:
546 PTR compat_sys_open_by_handle_at /* 4340 */ 546 PTR compat_sys_open_by_handle_at /* 4340 */
547 PTR compat_sys_clock_adjtime 547 PTR compat_sys_clock_adjtime
548 PTR sys_syncfs 548 PTR sys_syncfs
549 PTR sys_setns
549 .size sys_call_table,.-sys_call_table 550 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 812816c45662..ec38e00b2559 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -639,7 +639,6 @@ void __init txx9_physmap_flash_init(int no, unsigned long addr,
639 .flags = IORESOURCE_MEM, 639 .flags = IORESOURCE_MEM,
640 }; 640 };
641 struct platform_device *pdev; 641 struct platform_device *pdev;
642#ifdef CONFIG_MTD_PARTITIONS
643 static struct mtd_partition parts[2]; 642 static struct mtd_partition parts[2];
644 struct physmap_flash_data pdata_part; 643 struct physmap_flash_data pdata_part;
645 644
@@ -658,7 +657,7 @@ void __init txx9_physmap_flash_init(int no, unsigned long addr,
658 pdata_part.parts = parts; 657 pdata_part.parts = parts;
659 pdata = &pdata_part; 658 pdata = &pdata_part;
660 } 659 }
661#endif 660
662 pdev = platform_device_alloc("physmap-flash", no); 661 pdev = platform_device_alloc("physmap-flash", no);
663 if (!pdev || 662 if (!pdev ||
664 platform_device_add_resources(pdev, &res, 1) || 663 platform_device_add_resources(pdev, &res, 1) ||
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index feaf09cc8632..1f870340ebdd 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -44,9 +44,6 @@ config GENERIC_CALIBRATE_DELAY
44config GENERIC_CMOS_UPDATE 44config GENERIC_CMOS_UPDATE
45 def_bool n 45 def_bool n
46 46
47config GENERIC_FIND_NEXT_BIT
48 def_bool y
49
50config GENERIC_HWEIGHT 47config GENERIC_HWEIGHT
51 def_bool y 48 def_bool y
52 49
diff --git a/arch/mn10300/configs/asb2364_defconfig b/arch/mn10300/configs/asb2364_defconfig
index 31d76261a3d5..fbb96ae3122a 100644
--- a/arch/mn10300/configs/asb2364_defconfig
+++ b/arch/mn10300/configs/asb2364_defconfig
@@ -8,7 +8,6 @@ CONFIG_TASK_XACCT=y
8CONFIG_TASK_IO_ACCOUNTING=y 8CONFIG_TASK_IO_ACCOUNTING=y
9CONFIG_LOG_BUF_SHIFT=14 9CONFIG_LOG_BUF_SHIFT=14
10CONFIG_CGROUPS=y 10CONFIG_CGROUPS=y
11CONFIG_CGROUP_NS=y
12CONFIG_CGROUP_FREEZER=y 11CONFIG_CGROUP_FREEZER=y
13CONFIG_CGROUP_DEVICE=y 12CONFIG_CGROUP_DEVICE=y
14CONFIG_CGROUP_CPUACCT=y 13CONFIG_CGROUP_CPUACCT=y
diff --git a/arch/mn10300/include/asm/unistd.h b/arch/mn10300/include/asm/unistd.h
index 9d056f515929..9051f921cbc7 100644
--- a/arch/mn10300/include/asm/unistd.h
+++ b/arch/mn10300/include/asm/unistd.h
@@ -349,10 +349,11 @@
349#define __NR_rt_tgsigqueueinfo 336 349#define __NR_rt_tgsigqueueinfo 336
350#define __NR_perf_event_open 337 350#define __NR_perf_event_open 337
351#define __NR_recvmmsg 338 351#define __NR_recvmmsg 338
352#define __NR_setns 339
352 353
353#ifdef __KERNEL__ 354#ifdef __KERNEL__
354 355
355#define NR_syscalls 339 356#define NR_syscalls 340
356 357
357/* 358/*
358 * specify the deprecated syscalls we want to support on this arch 359 * specify the deprecated syscalls we want to support on this arch
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index fb93ad720b82..ae435e1d5669 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -759,6 +759,7 @@ ENTRY(sys_call_table)
759 .long sys_rt_tgsigqueueinfo 759 .long sys_rt_tgsigqueueinfo
760 .long sys_perf_event_open 760 .long sys_perf_event_open
761 .long sys_recvmmsg 761 .long sys_recvmmsg
762 .long sys_setns
762 763
763 764
764nr_syscalls=(.-sys_call_table)/4 765nr_syscalls=(.-sys_call_table)/4
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 69ff049c8571..65adc86a230e 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -47,14 +47,6 @@ config ARCH_HAS_ILOG2_U64
47 bool 47 bool
48 default n 48 default n
49 49
50config GENERIC_FIND_NEXT_BIT
51 bool
52 default y
53
54config GENERIC_FIND_BIT_LE
55 bool
56 default y
57
58config GENERIC_BUG 50config GENERIC_BUG
59 bool 51 bool
60 default y 52 default y
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index 9cbc2c3bf630..3392de3e7be0 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -820,8 +820,9 @@
820#define __NR_name_to_handle_at (__NR_Linux + 325) 820#define __NR_name_to_handle_at (__NR_Linux + 325)
821#define __NR_open_by_handle_at (__NR_Linux + 326) 821#define __NR_open_by_handle_at (__NR_Linux + 326)
822#define __NR_syncfs (__NR_Linux + 327) 822#define __NR_syncfs (__NR_Linux + 327)
823#define __NR_setns (__NR_Linux + 328)
823 824
824#define __NR_Linux_syscalls (__NR_syncfs + 1) 825#define __NR_Linux_syscalls (__NR_setns + 1)
825 826
826 827
827#define __IGNORE_select /* newselect */ 828#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index a5b02ce4d41e..34a4f5a2fffb 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -426,6 +426,7 @@
426 ENTRY_SAME(name_to_handle_at) /* 325 */ 426 ENTRY_SAME(name_to_handle_at) /* 325 */
427 ENTRY_COMP(open_by_handle_at) 427 ENTRY_COMP(open_by_handle_at)
428 ENTRY_SAME(syncfs) 428 ENTRY_SAME(syncfs)
429 ENTRY_SAME(setns)
429 430
430 /* Nothing yet */ 431 /* Nothing yet */
431 432
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2f6a22e8e935..2729c6663d8a 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -91,14 +91,6 @@ config GENERIC_HWEIGHT
91 bool 91 bool
92 default y 92 default y
93 93
94config GENERIC_FIND_NEXT_BIT
95 bool
96 default y
97
98config GENERIC_FIND_BIT_LE
99 bool
100 default y
101
102config GENERIC_GPIO 94config GENERIC_GPIO
103 bool 95 bool
104 help 96 help
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 214208924a9c..04360f9b0109 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -10,7 +10,6 @@ CONFIG_TASK_XACCT=y
10CONFIG_TASK_IO_ACCOUNTING=y 10CONFIG_TASK_IO_ACCOUNTING=y
11CONFIG_AUDIT=y 11CONFIG_AUDIT=y
12CONFIG_CGROUPS=y 12CONFIG_CGROUPS=y
13CONFIG_CGROUP_NS=y
14CONFIG_CGROUP_DEVICE=y 13CONFIG_CGROUP_DEVICE=y
15CONFIG_CGROUP_CPUACCT=y 14CONFIG_CGROUP_CPUACCT=y
16CONFIG_RESOURCE_COUNTERS=y 15CONFIG_RESOURCE_COUNTERS=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 7de13865508c..c9f212b5f3de 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -15,7 +15,6 @@ CONFIG_AUDITSYSCALL=y
15CONFIG_IKCONFIG=y 15CONFIG_IKCONFIG=y
16CONFIG_IKCONFIG_PROC=y 16CONFIG_IKCONFIG_PROC=y
17CONFIG_CGROUPS=y 17CONFIG_CGROUPS=y
18CONFIG_CGROUP_NS=y
19CONFIG_CGROUP_FREEZER=y 18CONFIG_CGROUP_FREEZER=y
20CONFIG_CGROUP_DEVICE=y 19CONFIG_CGROUP_DEVICE=y
21CONFIG_CPUSETS=y 20CONFIG_CPUSETS=y
diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h
index 5c1bf3466749..8a0b5ece8f76 100644
--- a/arch/powerpc/include/asm/fsl_lbc.h
+++ b/arch/powerpc/include/asm/fsl_lbc.h
@@ -157,6 +157,8 @@ struct fsl_lbc_regs {
157#define LBCR_EPAR_SHIFT 16 157#define LBCR_EPAR_SHIFT 16
158#define LBCR_BMT 0x0000FF00 158#define LBCR_BMT 0x0000FF00
159#define LBCR_BMT_SHIFT 8 159#define LBCR_BMT_SHIFT 8
160#define LBCR_BMTPS 0x0000000F
161#define LBCR_BMTPS_SHIFT 0
160#define LBCR_INIT 0x00040000 162#define LBCR_INIT 0x00040000
161 __be32 lcrr; /**< Clock Ratio Register */ 163 __be32 lcrr; /**< Clock Ratio Register */
162#define LCRR_DBYP 0x80000000 164#define LCRR_DBYP 0x80000000
diff --git a/arch/powerpc/include/asm/rio.h b/arch/powerpc/include/asm/rio.h
index 0018bf80cb25..d902abd33995 100644
--- a/arch/powerpc/include/asm/rio.h
+++ b/arch/powerpc/include/asm/rio.h
@@ -14,5 +14,10 @@
14#define ASM_PPC_RIO_H 14#define ASM_PPC_RIO_H
15 15
16extern void platform_rio_init(void); 16extern void platform_rio_init(void);
17#ifdef CONFIG_RAPIDIO
18extern int fsl_rio_mcheck_exception(struct pt_regs *);
19#else
20static inline int fsl_rio_mcheck_exception(struct pt_regs *regs) {return 0; }
21#endif
17 22
18#endif /* ASM_PPC_RIO_H */ 23#endif /* ASM_PPC_RIO_H */
diff --git a/arch/powerpc/include/asm/suspend.h b/arch/powerpc/include/asm/suspend.h
deleted file mode 100644
index c6efc3466aa6..000000000000
--- a/arch/powerpc/include/asm/suspend.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_POWERPC_SUSPEND_H
2#define __ASM_POWERPC_SUSPEND_H
3
4static inline int arch_prepare_suspend(void) { return 0; }
5
6#endif /* __ASM_POWERPC_SUSPEND_H */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 8489d372077f..f6736b7da463 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -353,3 +353,4 @@ COMPAT_SYS_SPU(open_by_handle_at)
353COMPAT_SYS_SPU(clock_adjtime) 353COMPAT_SYS_SPU(clock_adjtime)
354SYSCALL_SPU(syncfs) 354SYSCALL_SPU(syncfs)
355COMPAT_SYS_SPU(sendmmsg) 355COMPAT_SYS_SPU(sendmmsg)
356SYSCALL_SPU(setns)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 6d23c8193caa..b8b3f599362b 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -372,10 +372,11 @@
372#define __NR_clock_adjtime 347 372#define __NR_clock_adjtime 347
373#define __NR_syncfs 348 373#define __NR_syncfs 348
374#define __NR_sendmmsg 349 374#define __NR_sendmmsg 349
375#define __NR_setns 350
375 376
376#ifdef __KERNEL__ 377#ifdef __KERNEL__
377 378
378#define __NR_syscalls 350 379#define __NR_syscalls 351
379 380
380#define __NR__exit __NR_exit 381#define __NR__exit __NR_exit
381#define NR_syscalls __NR_syscalls 382#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 48aeb55faae9..f2c906b1d8d3 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -694,7 +694,7 @@ void __init early_init_devtree(void *params)
694 * device-tree, including the platform type, initrd location and 694 * device-tree, including the platform type, initrd location and
695 * size, TCE reserve, and more ... 695 * size, TCE reserve, and more ...
696 */ 696 */
697 of_scan_flat_dt(early_init_dt_scan_chosen_ppc, NULL); 697 of_scan_flat_dt(early_init_dt_scan_chosen_ppc, cmd_line);
698 698
699 /* Scan memory nodes and rebuild MEMBLOCKs */ 699 /* Scan memory nodes and rebuild MEMBLOCKs */
700 memblock_init(); 700 memblock_init();
diff --git a/arch/powerpc/kernel/swsusp.c b/arch/powerpc/kernel/swsusp.c
index 560c96119501..aa17b76dd427 100644
--- a/arch/powerpc/kernel/swsusp.c
+++ b/arch/powerpc/kernel/swsusp.c
@@ -10,7 +10,6 @@
10 */ 10 */
11 11
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <asm/suspend.h>
14#include <asm/system.h> 13#include <asm/system.h>
15#include <asm/current.h> 14#include <asm/current.h>
16#include <asm/mmu_context.h> 15#include <asm/mmu_context.h>
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index b13306b0d925..0ff4ab98d50c 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -55,6 +55,7 @@
55#endif 55#endif
56#include <asm/kexec.h> 56#include <asm/kexec.h>
57#include <asm/ppc-opcode.h> 57#include <asm/ppc-opcode.h>
58#include <asm/rio.h>
58 59
59#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 60#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
60int (*__debugger)(struct pt_regs *regs) __read_mostly; 61int (*__debugger)(struct pt_regs *regs) __read_mostly;
@@ -424,6 +425,12 @@ int machine_check_e500mc(struct pt_regs *regs)
424 unsigned long reason = mcsr; 425 unsigned long reason = mcsr;
425 int recoverable = 1; 426 int recoverable = 1;
426 427
428 if (reason & MCSR_BUS_RBERR) {
429 recoverable = fsl_rio_mcheck_exception(regs);
430 if (recoverable == 1)
431 goto silent_out;
432 }
433
427 printk("Machine check in kernel mode.\n"); 434 printk("Machine check in kernel mode.\n");
428 printk("Caused by (from MCSR=%lx): ", reason); 435 printk("Caused by (from MCSR=%lx): ", reason);
429 436
@@ -499,6 +506,7 @@ int machine_check_e500mc(struct pt_regs *regs)
499 reason & MCSR_MEA ? "Effective" : "Physical", addr); 506 reason & MCSR_MEA ? "Effective" : "Physical", addr);
500 } 507 }
501 508
509silent_out:
502 mtspr(SPRN_MCSR, mcsr); 510 mtspr(SPRN_MCSR, mcsr);
503 return mfspr(SPRN_MCSR) == 0 && recoverable; 511 return mfspr(SPRN_MCSR) == 0 && recoverable;
504} 512}
@@ -507,6 +515,11 @@ int machine_check_e500(struct pt_regs *regs)
507{ 515{
508 unsigned long reason = get_mc_reason(regs); 516 unsigned long reason = get_mc_reason(regs);
509 517
518 if (reason & MCSR_BUS_RBERR) {
519 if (fsl_rio_mcheck_exception(regs))
520 return 1;
521 }
522
510 printk("Machine check in kernel mode.\n"); 523 printk("Machine check in kernel mode.\n");
511 printk("Caused by (from MCSR=%lx): ", reason); 524 printk("Caused by (from MCSR=%lx): ", reason);
512 525
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index 4fcb5a4e60dd..0608b1657da4 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -184,7 +184,8 @@ int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, u32 mar)
184} 184}
185EXPORT_SYMBOL(fsl_upm_run_pattern); 185EXPORT_SYMBOL(fsl_upm_run_pattern);
186 186
187static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl) 187static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl,
188 struct device_node *node)
188{ 189{
189 struct fsl_lbc_regs __iomem *lbc = ctrl->regs; 190 struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
190 191
@@ -198,6 +199,10 @@ static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl)
198 /* Enable interrupts for any detected events */ 199 /* Enable interrupts for any detected events */
199 out_be32(&lbc->lteir, LTEIR_ENABLE); 200 out_be32(&lbc->lteir, LTEIR_ENABLE);
200 201
202 /* Set the monitor timeout value to the maximum for erratum A001 */
203 if (of_device_is_compatible(node, "fsl,elbc"))
204 clrsetbits_be32(&lbc->lbcr, LBCR_BMT, LBCR_BMTPS);
205
201 return 0; 206 return 0;
202} 207}
203 208
@@ -304,7 +309,7 @@ static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev)
304 309
305 fsl_lbc_ctrl_dev->dev = &dev->dev; 310 fsl_lbc_ctrl_dev->dev = &dev->dev;
306 311
307 ret = fsl_lbc_ctrl_init(fsl_lbc_ctrl_dev); 312 ret = fsl_lbc_ctrl_init(fsl_lbc_ctrl_dev, dev->dev.of_node);
308 if (ret < 0) 313 if (ret < 0)
309 goto err; 314 goto err;
310 315
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 49798532b477..5b206a2fe17c 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -10,7 +10,7 @@
10 * - Added Port-Write message handling 10 * - Added Port-Write message handling
11 * - Added Machine Check exception handling 11 * - Added Machine Check exception handling
12 * 12 *
13 * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc. 13 * Copyright (C) 2007, 2008, 2010 Freescale Semiconductor, Inc.
14 * Zhang Wei <wei.zhang@freescale.com> 14 * Zhang Wei <wei.zhang@freescale.com>
15 * 15 *
16 * Copyright 2005 MontaVista Software, Inc. 16 * Copyright 2005 MontaVista Software, Inc.
@@ -47,15 +47,33 @@
47#define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq) 47#define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq)
48#define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq) 48#define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq)
49 49
50#define IPWSR_CLEAR 0x98
51#define OMSR_CLEAR 0x1cb3
52#define IMSR_CLEAR 0x491
53#define IDSR_CLEAR 0x91
54#define ODSR_CLEAR 0x1c00
55#define LTLEECSR_ENABLE_ALL 0xFFC000FC
56#define ESCSR_CLEAR 0x07120204
57
58#define RIO_PORT1_EDCSR 0x0640
59#define RIO_PORT2_EDCSR 0x0680
60#define RIO_PORT1_IECSR 0x10130
61#define RIO_PORT2_IECSR 0x101B0
62#define RIO_IM0SR 0x13064
63#define RIO_IM1SR 0x13164
64#define RIO_OM0SR 0x13004
65#define RIO_OM1SR 0x13104
66
50#define RIO_ATMU_REGS_OFFSET 0x10c00 67#define RIO_ATMU_REGS_OFFSET 0x10c00
51#define RIO_P_MSG_REGS_OFFSET 0x11000 68#define RIO_P_MSG_REGS_OFFSET 0x11000
52#define RIO_S_MSG_REGS_OFFSET 0x13000 69#define RIO_S_MSG_REGS_OFFSET 0x13000
53#define RIO_GCCSR 0x13c 70#define RIO_GCCSR 0x13c
54#define RIO_ESCSR 0x158 71#define RIO_ESCSR 0x158
72#define RIO_PORT2_ESCSR 0x178
55#define RIO_CCSR 0x15c 73#define RIO_CCSR 0x15c
56#define RIO_LTLEDCSR 0x0608 74#define RIO_LTLEDCSR 0x0608
57#define RIO_LTLEDCSR_IER 0x80000000 75#define RIO_LTLEDCSR_IER 0x80000000
58#define RIO_LTLEDCSR_PRT 0x01000000 76#define RIO_LTLEDCSR_PRT 0x01000000
59#define RIO_LTLEECSR 0x060c 77#define RIO_LTLEECSR 0x060c
60#define RIO_EPWISR 0x10010 78#define RIO_EPWISR 0x10010
61#define RIO_ISR_AACR 0x10120 79#define RIO_ISR_AACR 0x10120
@@ -88,7 +106,10 @@
88#define RIO_IPWSR_PWD 0x00000008 106#define RIO_IPWSR_PWD 0x00000008
89#define RIO_IPWSR_PWB 0x00000004 107#define RIO_IPWSR_PWB 0x00000004
90 108
91#define RIO_EPWISR_PINT 0x80000000 109/* EPWISR Error match value */
110#define RIO_EPWISR_PINT1 0x80000000
111#define RIO_EPWISR_PINT2 0x40000000
112#define RIO_EPWISR_MU 0x00000002
92#define RIO_EPWISR_PW 0x00000001 113#define RIO_EPWISR_PW 0x00000001
93 114
94#define RIO_MSG_DESC_SIZE 32 115#define RIO_MSG_DESC_SIZE 32
@@ -260,9 +281,7 @@ struct rio_priv {
260static void __iomem *rio_regs_win; 281static void __iomem *rio_regs_win;
261 282
262#ifdef CONFIG_E500 283#ifdef CONFIG_E500
263static int (*saved_mcheck_exception)(struct pt_regs *regs); 284int fsl_rio_mcheck_exception(struct pt_regs *regs)
264
265static int fsl_rio_mcheck_exception(struct pt_regs *regs)
266{ 285{
267 const struct exception_table_entry *entry = NULL; 286 const struct exception_table_entry *entry = NULL;
268 unsigned long reason = mfspr(SPRN_MCSR); 287 unsigned long reason = mfspr(SPRN_MCSR);
@@ -284,11 +303,9 @@ static int fsl_rio_mcheck_exception(struct pt_regs *regs)
284 } 303 }
285 } 304 }
286 305
287 if (saved_mcheck_exception) 306 return 0;
288 return saved_mcheck_exception(regs);
289 else
290 return cur_cpu_spec->machine_check(regs);
291} 307}
308EXPORT_SYMBOL_GPL(fsl_rio_mcheck_exception);
292#endif 309#endif
293 310
294/** 311/**
@@ -1064,6 +1081,40 @@ static int fsl_rio_doorbell_init(struct rio_mport *mport)
1064 return rc; 1081 return rc;
1065} 1082}
1066 1083
1084static void port_error_handler(struct rio_mport *port, int offset)
1085{
1086 /*XXX: Error recovery is not implemented, we just clear errors */
1087 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
1088
1089 if (offset == 0) {
1090 out_be32((u32 *)(rio_regs_win + RIO_PORT1_EDCSR), 0);
1091 out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), 0);
1092 out_be32((u32 *)(rio_regs_win + RIO_ESCSR), ESCSR_CLEAR);
1093 } else {
1094 out_be32((u32 *)(rio_regs_win + RIO_PORT2_EDCSR), 0);
1095 out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), 0);
1096 out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR);
1097 }
1098}
1099
1100static void msg_unit_error_handler(struct rio_mport *port)
1101{
1102 struct rio_priv *priv = port->priv;
1103
1104 /*XXX: Error recovery is not implemented, we just clear errors */
1105 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
1106
1107 out_be32((u32 *)(rio_regs_win + RIO_IM0SR), IMSR_CLEAR);
1108 out_be32((u32 *)(rio_regs_win + RIO_IM1SR), IMSR_CLEAR);
1109 out_be32((u32 *)(rio_regs_win + RIO_OM0SR), OMSR_CLEAR);
1110 out_be32((u32 *)(rio_regs_win + RIO_OM1SR), OMSR_CLEAR);
1111
1112 out_be32(&priv->msg_regs->odsr, ODSR_CLEAR);
1113 out_be32(&priv->msg_regs->dsr, IDSR_CLEAR);
1114
1115 out_be32(&priv->msg_regs->pwsr, IPWSR_CLEAR);
1116}
1117
1067/** 1118/**
1068 * fsl_rio_port_write_handler - MPC85xx port write interrupt handler 1119 * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
1069 * @irq: Linux interrupt number 1120 * @irq: Linux interrupt number
@@ -1144,10 +1195,22 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
1144 } 1195 }
1145 1196
1146pw_done: 1197pw_done:
1147 if (epwisr & RIO_EPWISR_PINT) { 1198 if (epwisr & RIO_EPWISR_PINT1) {
1199 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
1200 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
1201 port_error_handler(port, 0);
1202 }
1203
1204 if (epwisr & RIO_EPWISR_PINT2) {
1148 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); 1205 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
1149 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); 1206 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
1150 out_be32(priv->regs_win + RIO_LTLEDCSR, 0); 1207 port_error_handler(port, 1);
1208 }
1209
1210 if (epwisr & RIO_EPWISR_MU) {
1211 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
1212 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
1213 msg_unit_error_handler(port);
1151 } 1214 }
1152 1215
1153 return IRQ_HANDLED; 1216 return IRQ_HANDLED;
@@ -1258,12 +1321,14 @@ static int fsl_rio_port_write_init(struct rio_mport *mport)
1258 1321
1259 1322
1260 /* Hook up port-write handler */ 1323 /* Hook up port-write handler */
1261 rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, 0, 1324 rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler,
1262 "port-write", (void *)mport); 1325 IRQF_SHARED, "port-write", (void *)mport);
1263 if (rc < 0) { 1326 if (rc < 0) {
1264 pr_err("MPC85xx RIO: unable to request inbound doorbell irq"); 1327 pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
1265 goto err_out; 1328 goto err_out;
1266 } 1329 }
1330 /* Enable Error Interrupt */
1331 out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL);
1267 1332
1268 INIT_WORK(&priv->pw_work, fsl_pw_dpc); 1333 INIT_WORK(&priv->pw_work, fsl_pw_dpc);
1269 spin_lock_init(&priv->pw_fifo_lock); 1334 spin_lock_init(&priv->pw_fifo_lock);
@@ -1538,11 +1603,6 @@ int fsl_rio_setup(struct platform_device *dev)
1538 fsl_rio_doorbell_init(port); 1603 fsl_rio_doorbell_init(port);
1539 fsl_rio_port_write_init(port); 1604 fsl_rio_port_write_init(port);
1540 1605
1541#ifdef CONFIG_E500
1542 saved_mcheck_exception = ppc_md.machine_check_exception;
1543 ppc_md.machine_check_exception = fsl_rio_mcheck_exception;
1544#endif
1545
1546 return 0; 1606 return 0;
1547err: 1607err:
1548 iounmap(priv->regs_win); 1608 iounmap(priv->regs_win);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index ff2d2371b2e9..9fab2aa9c2c8 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -2,7 +2,7 @@ config MMU
2 def_bool y 2 def_bool y
3 3
4config ZONE_DMA 4config ZONE_DMA
5 def_bool y if 64BIT 5 def_bool y
6 6
7config LOCKDEP_SUPPORT 7config LOCKDEP_SUPPORT
8 def_bool y 8 def_bool y
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index e43fe7537031..f7d3dc555bdb 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -92,9 +92,7 @@ static void appldata_get_mem_data(void *data)
92 mem_data->pswpin = ev[PSWPIN]; 92 mem_data->pswpin = ev[PSWPIN];
93 mem_data->pswpout = ev[PSWPOUT]; 93 mem_data->pswpout = ev[PSWPOUT];
94 mem_data->pgalloc = ev[PGALLOC_NORMAL]; 94 mem_data->pgalloc = ev[PGALLOC_NORMAL];
95#ifdef CONFIG_ZONE_DMA
96 mem_data->pgalloc += ev[PGALLOC_DMA]; 95 mem_data->pgalloc += ev[PGALLOC_DMA];
97#endif
98 mem_data->pgfault = ev[PGFAULT]; 96 mem_data->pgfault = ev[PGFAULT];
99 mem_data->pgmajfault = ev[PGMAJFAULT]; 97 mem_data->pgmajfault = ev[PGMAJFAULT];
100 98
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index e1c8f3a49884..667c6e9f6a34 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -621,6 +621,7 @@ static inline unsigned long find_first_zero_bit(const unsigned long *addr,
621 bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes)); 621 bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
622 return (bits < size) ? bits : size; 622 return (bits < size) ? bits : size;
623} 623}
624#define find_first_zero_bit find_first_zero_bit
624 625
625/** 626/**
626 * find_first_bit - find the first set bit in a memory region 627 * find_first_bit - find the first set bit in a memory region
@@ -641,6 +642,7 @@ static inline unsigned long find_first_bit(const unsigned long * addr,
641 bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes)); 642 bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
642 return (bits < size) ? bits : size; 643 return (bits < size) ? bits : size;
643} 644}
645#define find_first_bit find_first_bit
644 646
645/** 647/**
646 * find_next_zero_bit - find the first zero bit in a memory region 648 * find_next_zero_bit - find the first zero bit in a memory region
@@ -677,6 +679,7 @@ static inline int find_next_zero_bit (const unsigned long * addr,
677 } 679 }
678 return offset + find_first_zero_bit(p, size); 680 return offset + find_first_zero_bit(p, size);
679} 681}
682#define find_next_zero_bit find_next_zero_bit
680 683
681/** 684/**
682 * find_next_bit - find the first set bit in a memory region 685 * find_next_bit - find the first set bit in a memory region
@@ -713,6 +716,7 @@ static inline int find_next_bit (const unsigned long * addr,
713 } 716 }
714 return offset + find_first_bit(p, size); 717 return offset + find_first_bit(p, size);
715} 718}
719#define find_next_bit find_next_bit
716 720
717/* 721/*
718 * Every architecture must define this function. It's the fastest 722 * Every architecture must define this function. It's the fastest
@@ -742,41 +746,6 @@ static inline int sched_find_first_bit(unsigned long *b)
742 * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24 746 * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
743 */ 747 */
744 748
745static inline void __set_bit_le(unsigned long nr, void *addr)
746{
747 __set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
748}
749
750static inline void __clear_bit_le(unsigned long nr, void *addr)
751{
752 __clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
753}
754
755static inline int __test_and_set_bit_le(unsigned long nr, void *addr)
756{
757 return __test_and_set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
758}
759
760static inline int test_and_set_bit_le(unsigned long nr, void *addr)
761{
762 return test_and_set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
763}
764
765static inline int __test_and_clear_bit_le(unsigned long nr, void *addr)
766{
767 return __test_and_clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
768}
769
770static inline int test_and_clear_bit_le(unsigned long nr, void *addr)
771{
772 return test_and_clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
773}
774
775static inline int test_bit_le(unsigned long nr, const void *addr)
776{
777 return test_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
778}
779
780static inline int find_first_zero_bit_le(void *vaddr, unsigned int size) 749static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
781{ 750{
782 unsigned long bytes, bits; 751 unsigned long bytes, bits;
@@ -787,6 +756,7 @@ static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
787 bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes)); 756 bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
788 return (bits < size) ? bits : size; 757 return (bits < size) ? bits : size;
789} 758}
759#define find_first_zero_bit_le find_first_zero_bit_le
790 760
791static inline int find_next_zero_bit_le(void *vaddr, unsigned long size, 761static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
792 unsigned long offset) 762 unsigned long offset)
@@ -816,6 +786,7 @@ static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
816 } 786 }
817 return offset + find_first_zero_bit_le(p, size); 787 return offset + find_first_zero_bit_le(p, size);
818} 788}
789#define find_next_zero_bit_le find_next_zero_bit_le
819 790
820static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size) 791static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
821{ 792{
@@ -827,6 +798,7 @@ static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
827 bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes)); 798 bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
828 return (bits < size) ? bits : size; 799 return (bits < size) ? bits : size;
829} 800}
801#define find_first_bit_le find_first_bit_le
830 802
831static inline int find_next_bit_le(void *vaddr, unsigned long size, 803static inline int find_next_bit_le(void *vaddr, unsigned long size,
832 unsigned long offset) 804 unsigned long offset)
@@ -856,6 +828,9 @@ static inline int find_next_bit_le(void *vaddr, unsigned long size,
856 } 828 }
857 return offset + find_first_bit_le(p, size); 829 return offset + find_first_bit_le(p, size);
858} 830}
831#define find_next_bit_le find_next_bit_le
832
833#include <asm-generic/bitops/le.h>
859 834
860#define ext2_set_bit_atomic(lock, nr, addr) \ 835#define ext2_set_bit_atomic(lock, nr, addr) \
861 test_and_set_bit_le(nr, addr) 836 test_and_set_bit_le(nr, addr)
diff --git a/arch/s390/include/asm/delay.h b/arch/s390/include/asm/delay.h
index 8a096b83f51f..0e3b35f96be1 100644
--- a/arch/s390/include/asm/delay.h
+++ b/arch/s390/include/asm/delay.h
@@ -14,10 +14,12 @@
14#ifndef _S390_DELAY_H 14#ifndef _S390_DELAY_H
15#define _S390_DELAY_H 15#define _S390_DELAY_H
16 16
17extern void __udelay(unsigned long long usecs); 17void __ndelay(unsigned long long nsecs);
18extern void udelay_simple(unsigned long long usecs); 18void __udelay(unsigned long long usecs);
19extern void __delay(unsigned long loops); 19void udelay_simple(unsigned long long usecs);
20void __delay(unsigned long loops);
20 21
22#define ndelay(n) __ndelay((unsigned long long) (n))
21#define udelay(n) __udelay((unsigned long long) (n)) 23#define udelay(n) __udelay((unsigned long long) (n))
22#define mdelay(n) __udelay((unsigned long long) (n) * 1000) 24#define mdelay(n) __udelay((unsigned long long) (n) * 1000)
23 25
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 1544b90bd6d6..ba7b01c726a3 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -2,6 +2,7 @@
2#define _ASM_IRQ_H 2#define _ASM_IRQ_H
3 3
4#include <linux/hardirq.h> 4#include <linux/hardirq.h>
5#include <linux/types.h>
5 6
6enum interruption_class { 7enum interruption_class {
7 EXTERNAL_INTERRUPT, 8 EXTERNAL_INTERRUPT,
@@ -31,4 +32,11 @@ enum interruption_class {
31 NR_IRQS, 32 NR_IRQS,
32}; 33};
33 34
35typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long);
36
37int register_external_interrupt(u16 code, ext_int_handler_t handler);
38int unregister_external_interrupt(u16 code, ext_int_handler_t handler);
39void service_subclass_irq_register(void);
40void service_subclass_irq_unregister(void);
41
34#endif /* _ASM_IRQ_H */ 42#endif /* _ASM_IRQ_H */
diff --git a/arch/s390/include/asm/s390_ext.h b/arch/s390/include/asm/s390_ext.h
deleted file mode 100644
index 080876d5f196..000000000000
--- a/arch/s390/include/asm/s390_ext.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/*
2 * Copyright IBM Corp. 1999,2010
3 * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
4 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 */
6
7#ifndef _S390_EXTINT_H
8#define _S390_EXTINT_H
9
10#include <linux/types.h>
11
12typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long);
13
14int register_external_interrupt(__u16 code, ext_int_handler_t handler);
15int unregister_external_interrupt(__u16 code, ext_int_handler_t handler);
16
17#endif /* _S390_EXTINT_H */
diff --git a/arch/s390/include/asm/suspend.h b/arch/s390/include/asm/suspend.h
deleted file mode 100644
index dc75c616eafe..000000000000
--- a/arch/s390/include/asm/suspend.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef __ASM_S390_SUSPEND_H
2#define __ASM_S390_SUSPEND_H
3
4static inline int arch_prepare_suspend(void)
5{
6 return 0;
7}
8
9#endif
10
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index c5338834ddbd..005d77d8ae2a 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -7,7 +7,7 @@
7extern unsigned char cpu_core_id[NR_CPUS]; 7extern unsigned char cpu_core_id[NR_CPUS];
8extern cpumask_t cpu_core_map[NR_CPUS]; 8extern cpumask_t cpu_core_map[NR_CPUS];
9 9
10static inline const struct cpumask *cpu_coregroup_mask(unsigned int cpu) 10static inline const struct cpumask *cpu_coregroup_mask(int cpu)
11{ 11{
12 return &cpu_core_map[cpu]; 12 return &cpu_core_map[cpu];
13} 13}
@@ -21,7 +21,7 @@ static inline const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
21extern unsigned char cpu_book_id[NR_CPUS]; 21extern unsigned char cpu_book_id[NR_CPUS];
22extern cpumask_t cpu_book_map[NR_CPUS]; 22extern cpumask_t cpu_book_map[NR_CPUS];
23 23
24static inline const struct cpumask *cpu_book_mask(unsigned int cpu) 24static inline const struct cpumask *cpu_book_mask(int cpu)
25{ 25{
26 return &cpu_book_map[cpu]; 26 return &cpu_book_map[cpu];
27} 27}
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 2d9ea11f919a..2b23885e81e9 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -49,12 +49,13 @@
49 49
50#define segment_eq(a,b) ((a).ar4 == (b).ar4) 50#define segment_eq(a,b) ((a).ar4 == (b).ar4)
51 51
52#define __access_ok(addr, size) \
53({ \
54 __chk_user_ptr(addr); \
55 1; \
56})
52 57
53static inline int __access_ok(const void __user *addr, unsigned long size) 58#define access_ok(type, addr, size) __access_ok(addr, size)
54{
55 return 1;
56}
57#define access_ok(type,addr,size) __access_ok(addr,size)
58 59
59/* 60/*
60 * The exception table consists of pairs of addresses: the first is the 61 * The exception table consists of pairs of addresses: the first is the
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 9208e69245a0..404bdb9671b4 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -276,7 +276,8 @@
276#define __NR_open_by_handle_at 336 276#define __NR_open_by_handle_at 336
277#define __NR_clock_adjtime 337 277#define __NR_clock_adjtime 337
278#define __NR_syncfs 338 278#define __NR_syncfs 338
279#define NR_syscalls 339 279#define __NR_setns 339
280#define NR_syscalls 340
280 281
281/* 282/*
282 * There are some system calls that are not present on 64 bit, some 283 * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 5ff15dacb571..df3732249baa 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -20,10 +20,10 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
20 20
21CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w 21CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
22 22
23obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ 23obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \
25 s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ 25 debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \
26 vdso.o vtime.o sysinfo.o nmi.o sclp.o jump_label.o 26 sysinfo.o jump_label.o
27 27
28obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 28obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 1dc96ea08fa8..1f5eb789c3a7 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1904,3 +1904,9 @@ compat_sys_clock_adjtime_wrapper:
1904sys_syncfs_wrapper: 1904sys_syncfs_wrapper:
1905 lgfr %r2,%r2 # int 1905 lgfr %r2,%r2 # int
1906 jg sys_syncfs 1906 jg sys_syncfs
1907
1908 .globl sys_setns_wrapper
1909sys_setns_wrapper:
1910 lgfr %r2,%r2 # int
1911 lgfr %r3,%r3 # int
1912 jg sys_setns
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 3d4a78fc1adc..1ca3d1d6a86c 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -30,9 +30,9 @@
30#include <asm/atomic.h> 30#include <asm/atomic.h>
31#include <asm/mathemu.h> 31#include <asm/mathemu.h>
32#include <asm/cpcmd.h> 32#include <asm/cpcmd.h>
33#include <asm/s390_ext.h>
34#include <asm/lowcore.h> 33#include <asm/lowcore.h>
35#include <asm/debug.h> 34#include <asm/debug.h>
35#include <asm/irq.h>
36 36
37#ifndef CONFIG_64BIT 37#ifndef CONFIG_64BIT
38#define ONELONG "%08lx: " 38#define ONELONG "%08lx: "
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index e204f9597aaf..e3264f6a9720 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -1,19 +1,28 @@
1/* 1/*
2 * Copyright IBM Corp. 2004,2010 2 * Copyright IBM Corp. 2004,2011
3 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
4 * Thomas Spatzier (tspat@de.ibm.com) 4 * Holger Smolinski <Holger.Smolinski@de.ibm.com>,
5 * Thomas Spatzier <tspat@de.ibm.com>,
5 * 6 *
6 * This file contains interrupt related functions. 7 * This file contains interrupt related functions.
7 */ 8 */
8 9
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/kernel_stat.h> 10#include <linux/kernel_stat.h>
12#include <linux/interrupt.h> 11#include <linux/interrupt.h>
13#include <linux/seq_file.h> 12#include <linux/seq_file.h>
14#include <linux/cpu.h>
15#include <linux/proc_fs.h> 13#include <linux/proc_fs.h>
16#include <linux/profile.h> 14#include <linux/profile.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/ftrace.h>
18#include <linux/errno.h>
19#include <linux/slab.h>
20#include <linux/cpu.h>
21#include <asm/irq_regs.h>
22#include <asm/cputime.h>
23#include <asm/lowcore.h>
24#include <asm/irq.h>
25#include "entry.h"
17 26
18struct irq_class { 27struct irq_class {
19 char *name; 28 char *name;
@@ -82,8 +91,7 @@ int show_interrupts(struct seq_file *p, void *v)
82 * For compatibilty only. S/390 specific setup of interrupts et al. is done 91 * For compatibilty only. S/390 specific setup of interrupts et al. is done
83 * much later in init_channel_subsystem(). 92 * much later in init_channel_subsystem().
84 */ 93 */
85void __init 94void __init init_IRQ(void)
86init_IRQ(void)
87{ 95{
88 /* nothing... */ 96 /* nothing... */
89} 97}
@@ -134,3 +142,116 @@ void init_irq_proc(void)
134 create_prof_cpu_mask(root_irq_dir); 142 create_prof_cpu_mask(root_irq_dir);
135} 143}
136#endif 144#endif
145
146/*
147 * ext_int_hash[index] is the start of the list for all external interrupts
148 * that hash to this index. With the current set of external interrupts
149 * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
150 * iucv and 0x2603 pfault) this is always the first element.
151 */
152
153struct ext_int_info {
154 struct ext_int_info *next;
155 ext_int_handler_t handler;
156 u16 code;
157};
158
159static struct ext_int_info *ext_int_hash[256];
160
161static inline int ext_hash(u16 code)
162{
163 return (code + (code >> 9)) & 0xff;
164}
165
166int register_external_interrupt(u16 code, ext_int_handler_t handler)
167{
168 struct ext_int_info *p;
169 int index;
170
171 p = kmalloc(sizeof(*p), GFP_ATOMIC);
172 if (!p)
173 return -ENOMEM;
174 p->code = code;
175 p->handler = handler;
176 index = ext_hash(code);
177 p->next = ext_int_hash[index];
178 ext_int_hash[index] = p;
179 return 0;
180}
181EXPORT_SYMBOL(register_external_interrupt);
182
183int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
184{
185 struct ext_int_info *p, *q;
186 int index;
187
188 index = ext_hash(code);
189 q = NULL;
190 p = ext_int_hash[index];
191 while (p) {
192 if (p->code == code && p->handler == handler)
193 break;
194 q = p;
195 p = p->next;
196 }
197 if (!p)
198 return -ENOENT;
199 if (q)
200 q->next = p->next;
201 else
202 ext_int_hash[index] = p->next;
203 kfree(p);
204 return 0;
205}
206EXPORT_SYMBOL(unregister_external_interrupt);
207
208void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
209 unsigned int param32, unsigned long param64)
210{
211 struct pt_regs *old_regs;
212 unsigned short code;
213 struct ext_int_info *p;
214 int index;
215
216 code = (unsigned short) ext_int_code;
217 old_regs = set_irq_regs(regs);
218 s390_idle_check(regs, S390_lowcore.int_clock,
219 S390_lowcore.async_enter_timer);
220 irq_enter();
221 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
222 /* Serve timer interrupts first. */
223 clock_comparator_work();
224 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
225 if (code != 0x1004)
226 __get_cpu_var(s390_idle).nohz_delay = 1;
227 index = ext_hash(code);
228 for (p = ext_int_hash[index]; p; p = p->next) {
229 if (likely(p->code == code))
230 p->handler(ext_int_code, param32, param64);
231 }
232 irq_exit();
233 set_irq_regs(old_regs);
234}
235
236static DEFINE_SPINLOCK(sc_irq_lock);
237static int sc_irq_refcount;
238
239void service_subclass_irq_register(void)
240{
241 spin_lock(&sc_irq_lock);
242 if (!sc_irq_refcount)
243 ctl_set_bit(0, 9);
244 sc_irq_refcount++;
245 spin_unlock(&sc_irq_lock);
246}
247EXPORT_SYMBOL(service_subclass_irq_register);
248
249void service_subclass_irq_unregister(void)
250{
251 spin_lock(&sc_irq_lock);
252 sc_irq_refcount--;
253 if (!sc_irq_refcount)
254 ctl_clear_bit(0, 9);
255 spin_unlock(&sc_irq_lock);
256}
257EXPORT_SYMBOL(service_subclass_irq_unregister);
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
deleted file mode 100644
index 185029919c4d..000000000000
--- a/arch/s390/kernel/s390_ext.c
+++ /dev/null
@@ -1,108 +0,0 @@
1/*
2 * Copyright IBM Corp. 1999,2010
3 * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
4 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 */
6
7#include <linux/kernel_stat.h>
8#include <linux/interrupt.h>
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/ftrace.h>
12#include <linux/errno.h>
13#include <linux/slab.h>
14#include <asm/s390_ext.h>
15#include <asm/irq_regs.h>
16#include <asm/cputime.h>
17#include <asm/lowcore.h>
18#include <asm/irq.h>
19#include "entry.h"
20
21struct ext_int_info {
22 struct ext_int_info *next;
23 ext_int_handler_t handler;
24 __u16 code;
25};
26
27/*
28 * ext_int_hash[index] is the start of the list for all external interrupts
29 * that hash to this index. With the current set of external interrupts
30 * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
31 * iucv and 0x2603 pfault) this is always the first element.
32 */
33static struct ext_int_info *ext_int_hash[256];
34
35static inline int ext_hash(__u16 code)
36{
37 return (code + (code >> 9)) & 0xff;
38}
39
40int register_external_interrupt(__u16 code, ext_int_handler_t handler)
41{
42 struct ext_int_info *p;
43 int index;
44
45 p = kmalloc(sizeof(*p), GFP_ATOMIC);
46 if (!p)
47 return -ENOMEM;
48 p->code = code;
49 p->handler = handler;
50 index = ext_hash(code);
51 p->next = ext_int_hash[index];
52 ext_int_hash[index] = p;
53 return 0;
54}
55EXPORT_SYMBOL(register_external_interrupt);
56
57int unregister_external_interrupt(__u16 code, ext_int_handler_t handler)
58{
59 struct ext_int_info *p, *q;
60 int index;
61
62 index = ext_hash(code);
63 q = NULL;
64 p = ext_int_hash[index];
65 while (p) {
66 if (p->code == code && p->handler == handler)
67 break;
68 q = p;
69 p = p->next;
70 }
71 if (!p)
72 return -ENOENT;
73 if (q)
74 q->next = p->next;
75 else
76 ext_int_hash[index] = p->next;
77 kfree(p);
78 return 0;
79}
80EXPORT_SYMBOL(unregister_external_interrupt);
81
82void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
83 unsigned int param32, unsigned long param64)
84{
85 struct pt_regs *old_regs;
86 unsigned short code;
87 struct ext_int_info *p;
88 int index;
89
90 code = (unsigned short) ext_int_code;
91 old_regs = set_irq_regs(regs);
92 s390_idle_check(regs, S390_lowcore.int_clock,
93 S390_lowcore.async_enter_timer);
94 irq_enter();
95 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
96 /* Serve timer interrupts first. */
97 clock_comparator_work();
98 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
99 if (code != 0x1004)
100 __get_cpu_var(s390_idle).nohz_delay = 1;
101 index = ext_hash(code);
102 for (p = ext_int_hash[index]; p; p = p->next) {
103 if (likely(p->code == code))
104 p->handler(ext_int_code, param32, param64);
105 }
106 irq_exit();
107 set_irq_regs(old_regs);
108}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index f8e85ecbc459..52420d2785b3 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -44,7 +44,6 @@
44#include <asm/sigp.h> 44#include <asm/sigp.h>
45#include <asm/pgalloc.h> 45#include <asm/pgalloc.h>
46#include <asm/irq.h> 46#include <asm/irq.h>
47#include <asm/s390_ext.h>
48#include <asm/cpcmd.h> 47#include <asm/cpcmd.h>
49#include <asm/tlbflush.h> 48#include <asm/tlbflush.h>
50#include <asm/timer.h> 49#include <asm/timer.h>
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 9c65fd4ddce0..6ee39ef8fe4a 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -347,3 +347,4 @@ SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,sys_name_to_handle_at_wrappe
347SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at_wrapper) 347SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at_wrapper)
348SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper) 348SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper)
349SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper) 349SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper)
350SYSCALL(sys_setns,sys_setns,sys_setns_wrapper)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index a59557f1fb5f..dff933065ab6 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -41,7 +41,6 @@
41#include <linux/kprobes.h> 41#include <linux/kprobes.h>
42#include <asm/uaccess.h> 42#include <asm/uaccess.h>
43#include <asm/delay.h> 43#include <asm/delay.h>
44#include <asm/s390_ext.h>
45#include <asm/div64.h> 44#include <asm/div64.h>
46#include <asm/vdso.h> 45#include <asm/vdso.h>
47#include <asm/irq.h> 46#include <asm/irq.h>
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 2eafb8c7a746..0cd340b72632 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -17,7 +17,6 @@
17#include <linux/smp.h> 17#include <linux/smp.h>
18#include <linux/cpuset.h> 18#include <linux/cpuset.h>
19#include <asm/delay.h> 19#include <asm/delay.h>
20#include <asm/s390_ext.h>
21 20
22#define PTF_HORIZONTAL (0UL) 21#define PTF_HORIZONTAL (0UL)
23#define PTF_VERTICAL (1UL) 22#define PTF_VERTICAL (1UL)
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index b5a4a739b477..a65d2e82f61d 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -39,7 +39,6 @@
39#include <asm/atomic.h> 39#include <asm/atomic.h>
40#include <asm/mathemu.h> 40#include <asm/mathemu.h>
41#include <asm/cpcmd.h> 41#include <asm/cpcmd.h>
42#include <asm/s390_ext.h>
43#include <asm/lowcore.h> 42#include <asm/lowcore.h>
44#include <asm/debug.h> 43#include <asm/debug.h>
45#include "entry.h" 44#include "entry.h"
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 5e8ead4b4aba..2d6228f60cd6 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -22,10 +22,10 @@
22#include <linux/cpu.h> 22#include <linux/cpu.h>
23#include <linux/kprobes.h> 23#include <linux/kprobes.h>
24 24
25#include <asm/s390_ext.h>
26#include <asm/timer.h> 25#include <asm/timer.h>
27#include <asm/irq_regs.h> 26#include <asm/irq_regs.h>
28#include <asm/cputime.h> 27#include <asm/cputime.h>
28#include <asm/irq.h>
29 29
30static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); 30static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
31 31
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 0f53110e1d09..a65229d91c92 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/irqflags.h> 13#include <linux/irqflags.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <asm/div64.h>
15 16
16void __delay(unsigned long loops) 17void __delay(unsigned long loops)
17{ 18{
@@ -116,3 +117,17 @@ void udelay_simple(unsigned long long usecs)
116 while (get_clock() < end) 117 while (get_clock() < end)
117 cpu_relax(); 118 cpu_relax();
118} 119}
120
121void __ndelay(unsigned long long nsecs)
122{
123 u64 end;
124
125 nsecs <<= 9;
126 do_div(nsecs, 125);
127 end = get_clock() + nsecs;
128 if (nsecs & ~0xfffUL)
129 __udelay(nsecs >> 12);
130 while (get_clock() < end)
131 barrier();
132}
133EXPORT_SYMBOL(__ndelay);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index a0f9e730f26a..fe103e891e7a 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -34,7 +34,7 @@
34#include <asm/asm-offsets.h> 34#include <asm/asm-offsets.h>
35#include <asm/system.h> 35#include <asm/system.h>
36#include <asm/pgtable.h> 36#include <asm/pgtable.h>
37#include <asm/s390_ext.h> 37#include <asm/irq.h>
38#include <asm/mmu_context.h> 38#include <asm/mmu_context.h>
39#include <asm/compat.h> 39#include <asm/compat.h>
40#include "../kernel/entry.h" 40#include "../kernel/entry.h"
@@ -245,9 +245,12 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code,
245 do_no_context(regs, int_code, trans_exc_code); 245 do_no_context(regs, int_code, trans_exc_code);
246 break; 246 break;
247 default: /* fault & VM_FAULT_ERROR */ 247 default: /* fault & VM_FAULT_ERROR */
248 if (fault & VM_FAULT_OOM) 248 if (fault & VM_FAULT_OOM) {
249 pagefault_out_of_memory(); 249 if (!(regs->psw.mask & PSW_MASK_PSTATE))
250 else if (fault & VM_FAULT_SIGBUS) { 250 do_no_context(regs, int_code, trans_exc_code);
251 else
252 pagefault_out_of_memory();
253 } else if (fault & VM_FAULT_SIGBUS) {
251 /* Kernel mode? Handle exceptions or die */ 254 /* Kernel mode? Handle exceptions or die */
252 if (!(regs->psw.mask & PSW_MASK_PSTATE)) 255 if (!(regs->psw.mask & PSW_MASK_PSTATE))
253 do_no_context(regs, int_code, trans_exc_code); 256 do_no_context(regs, int_code, trans_exc_code);
@@ -277,7 +280,8 @@ static inline int do_exception(struct pt_regs *regs, int access,
277 struct mm_struct *mm; 280 struct mm_struct *mm;
278 struct vm_area_struct *vma; 281 struct vm_area_struct *vma;
279 unsigned long address; 282 unsigned long address;
280 int fault, write; 283 unsigned int flags;
284 int fault;
281 285
282 if (notify_page_fault(regs)) 286 if (notify_page_fault(regs))
283 return 0; 287 return 0;
@@ -296,6 +300,10 @@ static inline int do_exception(struct pt_regs *regs, int access,
296 300
297 address = trans_exc_code & __FAIL_ADDR_MASK; 301 address = trans_exc_code & __FAIL_ADDR_MASK;
298 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 302 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
303 flags = FAULT_FLAG_ALLOW_RETRY;
304 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
305 flags |= FAULT_FLAG_WRITE;
306retry:
299 down_read(&mm->mmap_sem); 307 down_read(&mm->mmap_sem);
300 308
301 fault = VM_FAULT_BADMAP; 309 fault = VM_FAULT_BADMAP;
@@ -325,21 +333,31 @@ static inline int do_exception(struct pt_regs *regs, int access,
325 * make sure we exit gracefully rather than endlessly redo 333 * make sure we exit gracefully rather than endlessly redo
326 * the fault. 334 * the fault.
327 */ 335 */
328 write = (access == VM_WRITE || 336 fault = handle_mm_fault(mm, vma, address, flags);
329 (trans_exc_code & store_indication) == 0x400) ?
330 FAULT_FLAG_WRITE : 0;
331 fault = handle_mm_fault(mm, vma, address, write);
332 if (unlikely(fault & VM_FAULT_ERROR)) 337 if (unlikely(fault & VM_FAULT_ERROR))
333 goto out_up; 338 goto out_up;
334 339
335 if (fault & VM_FAULT_MAJOR) { 340 /*
336 tsk->maj_flt++; 341 * Major/minor page fault accounting is only done on the
337 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 342 * initial attempt. If we go through a retry, it is extremely
338 regs, address); 343 * likely that the page will be found in page cache at that point.
339 } else { 344 */
340 tsk->min_flt++; 345 if (flags & FAULT_FLAG_ALLOW_RETRY) {
341 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 346 if (fault & VM_FAULT_MAJOR) {
342 regs, address); 347 tsk->maj_flt++;
348 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
349 regs, address);
350 } else {
351 tsk->min_flt++;
352 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
353 regs, address);
354 }
355 if (fault & VM_FAULT_RETRY) {
356 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
357 * of starvation. */
358 flags &= ~FAULT_FLAG_ALLOW_RETRY;
359 goto retry;
360 }
343 } 361 }
344 /* 362 /*
345 * The instruction that caused the program check will 363 * The instruction that caused the program check will
@@ -429,10 +447,9 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
429 access = write ? VM_WRITE : VM_READ; 447 access = write ? VM_WRITE : VM_READ;
430 fault = do_exception(&regs, access, uaddr | 2); 448 fault = do_exception(&regs, access, uaddr | 2);
431 if (unlikely(fault)) { 449 if (unlikely(fault)) {
432 if (fault & VM_FAULT_OOM) { 450 if (fault & VM_FAULT_OOM)
433 pagefault_out_of_memory(); 451 return -EFAULT;
434 fault = 0; 452 else if (fault & VM_FAULT_SIGBUS)
435 } else if (fault & VM_FAULT_SIGBUS)
436 do_sigbus(&regs, pgm_int_code, uaddr); 453 do_sigbus(&regs, pgm_int_code, uaddr);
437 } 454 }
438 return fault ? -EFAULT : 0; 455 return fault ? -EFAULT : 0;
@@ -485,7 +502,6 @@ int pfault_init(void)
485 "2:\n" 502 "2:\n"
486 EX_TABLE(0b,1b) 503 EX_TABLE(0b,1b)
487 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc"); 504 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
488 __ctl_set_bit(0, 9);
489 return rc; 505 return rc;
490} 506}
491 507
@@ -500,7 +516,6 @@ void pfault_fini(void)
500 516
501 if (!MACHINE_IS_VM || pfault_disable) 517 if (!MACHINE_IS_VM || pfault_disable)
502 return; 518 return;
503 __ctl_clear_bit(0,9);
504 asm volatile( 519 asm volatile(
505 " diag %0,0,0x258\n" 520 " diag %0,0,0x258\n"
506 "0:\n" 521 "0:\n"
@@ -615,6 +630,7 @@ static int __init pfault_irq_init(void)
615 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP; 630 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
616 if (rc) 631 if (rc)
617 goto out_pfault; 632 goto out_pfault;
633 service_subclass_irq_register();
618 hotcpu_notifier(pfault_cpu_notify, 0); 634 hotcpu_notifier(pfault_cpu_notify, 0);
619 return 0; 635 return 0;
620 636
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index dfefc2171691..59b663109d90 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -119,9 +119,7 @@ void __init paging_init(void)
119 sparse_memory_present_with_active_regions(MAX_NUMNODES); 119 sparse_memory_present_with_active_regions(MAX_NUMNODES);
120 sparse_init(); 120 sparse_init();
121 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 121 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
122#ifdef CONFIG_ZONE_DMA
123 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); 122 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
124#endif
125 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 123 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
126 free_area_init_nodes(max_zone_pfns); 124 free_area_init_nodes(max_zone_pfns);
127 fault_init(); 125 fault_init();
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index 053caa0fd276..4552ce40c81a 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -19,7 +19,7 @@
19#include <linux/oprofile.h> 19#include <linux/oprofile.h>
20 20
21#include <asm/lowcore.h> 21#include <asm/lowcore.h>
22#include <asm/s390_ext.h> 22#include <asm/irq.h>
23 23
24#include "hwsampler.h" 24#include "hwsampler.h"
25 25
@@ -580,7 +580,7 @@ static int hws_cpu_callback(struct notifier_block *nfb,
580{ 580{
581 /* We do not have sampler space available for all possible CPUs. 581 /* We do not have sampler space available for all possible CPUs.
582 All CPUs should be online when hw sampling is activated. */ 582 All CPUs should be online when hw sampling is activated. */
583 return NOTIFY_BAD; 583 return (hws_state <= HWS_DEALLOCATED) ? NOTIFY_OK : NOTIFY_BAD;
584} 584}
585 585
586static struct notifier_block hws_cpu_notifier = { 586static struct notifier_block hws_cpu_notifier = {
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index e73bc781cc14..288add8d168f 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -43,9 +43,6 @@ config NO_DMA
43config RWSEM_GENERIC_SPINLOCK 43config RWSEM_GENERIC_SPINLOCK
44 def_bool y 44 def_bool y
45 45
46config GENERIC_FIND_NEXT_BIT
47 def_bool y
48
49config GENERIC_HWEIGHT 46config GENERIC_HWEIGHT
50 def_bool y 47 def_bool y
51 48
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index b44e37753b9a..74495a5ea027 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -71,12 +71,6 @@ config GENERIC_CSUM
71 def_bool y 71 def_bool y
72 depends on SUPERH64 72 depends on SUPERH64
73 73
74config GENERIC_FIND_NEXT_BIT
75 def_bool y
76
77config GENERIC_FIND_BIT_LE
78 def_bool y
79
80config GENERIC_HWEIGHT 74config GENERIC_HWEIGHT
81 def_bool y 75 def_bool y
82 76
diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig
index 77ec0e7b8ddf..e7583484cc07 100644
--- a/arch/sh/configs/apsh4ad0a_defconfig
+++ b/arch/sh/configs/apsh4ad0a_defconfig
@@ -7,7 +7,6 @@ CONFIG_IKCONFIG=y
7CONFIG_IKCONFIG_PROC=y 7CONFIG_IKCONFIG_PROC=y
8CONFIG_LOG_BUF_SHIFT=14 8CONFIG_LOG_BUF_SHIFT=14
9CONFIG_CGROUPS=y 9CONFIG_CGROUPS=y
10CONFIG_CGROUP_NS=y
11CONFIG_CGROUP_FREEZER=y 10CONFIG_CGROUP_FREEZER=y
12CONFIG_CGROUP_DEVICE=y 11CONFIG_CGROUP_DEVICE=y
13CONFIG_CGROUP_CPUACCT=y 12CONFIG_CGROUP_CPUACCT=y
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig
index c41650572d79..8a7dd7b59c5c 100644
--- a/arch/sh/configs/sdk7786_defconfig
+++ b/arch/sh/configs/sdk7786_defconfig
@@ -12,7 +12,6 @@ CONFIG_IKCONFIG=y
12CONFIG_IKCONFIG_PROC=y 12CONFIG_IKCONFIG_PROC=y
13CONFIG_CGROUPS=y 13CONFIG_CGROUPS=y
14CONFIG_CGROUP_DEBUG=y 14CONFIG_CGROUP_DEBUG=y
15CONFIG_CGROUP_NS=y
16CONFIG_CGROUP_FREEZER=y 15CONFIG_CGROUP_FREEZER=y
17CONFIG_CGROUP_DEVICE=y 16CONFIG_CGROUP_DEVICE=y
18CONFIG_CPUSETS=y 17CONFIG_CPUSETS=y
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig
index a468ff227fc6..72c3fad7383f 100644
--- a/arch/sh/configs/se7206_defconfig
+++ b/arch/sh/configs/se7206_defconfig
@@ -8,7 +8,6 @@ CONFIG_RCU_TRACE=y
8CONFIG_LOG_BUF_SHIFT=14 8CONFIG_LOG_BUF_SHIFT=14
9CONFIG_CGROUPS=y 9CONFIG_CGROUPS=y
10CONFIG_CGROUP_DEBUG=y 10CONFIG_CGROUP_DEBUG=y
11CONFIG_CGROUP_NS=y
12CONFIG_CGROUP_DEVICE=y 11CONFIG_CGROUP_DEVICE=y
13CONFIG_CGROUP_CPUACCT=y 12CONFIG_CGROUP_CPUACCT=y
14CONFIG_RESOURCE_COUNTERS=y 13CONFIG_RESOURCE_COUNTERS=y
diff --git a/arch/sh/configs/shx3_defconfig b/arch/sh/configs/shx3_defconfig
index 3f92d37c6374..6bb413036892 100644
--- a/arch/sh/configs/shx3_defconfig
+++ b/arch/sh/configs/shx3_defconfig
@@ -9,7 +9,6 @@ CONFIG_IKCONFIG=y
9CONFIG_IKCONFIG_PROC=y 9CONFIG_IKCONFIG_PROC=y
10CONFIG_LOG_BUF_SHIFT=14 10CONFIG_LOG_BUF_SHIFT=14
11CONFIG_CGROUPS=y 11CONFIG_CGROUPS=y
12CONFIG_CGROUP_NS=y
13CONFIG_CGROUP_FREEZER=y 12CONFIG_CGROUP_FREEZER=y
14CONFIG_CGROUP_DEVICE=y 13CONFIG_CGROUP_DEVICE=y
15CONFIG_CGROUP_CPUACCT=y 14CONFIG_CGROUP_CPUACCT=y
diff --git a/arch/sh/configs/urquell_defconfig b/arch/sh/configs/urquell_defconfig
index 7b3daec6fefe..8bfa4d056d7a 100644
--- a/arch/sh/configs/urquell_defconfig
+++ b/arch/sh/configs/urquell_defconfig
@@ -9,7 +9,6 @@ CONFIG_IKCONFIG_PROC=y
9CONFIG_LOG_BUF_SHIFT=14 9CONFIG_LOG_BUF_SHIFT=14
10CONFIG_CGROUPS=y 10CONFIG_CGROUPS=y
11CONFIG_CGROUP_DEBUG=y 11CONFIG_CGROUP_DEBUG=y
12CONFIG_CGROUP_NS=y
13CONFIG_CGROUP_FREEZER=y 12CONFIG_CGROUP_FREEZER=y
14CONFIG_CGROUP_DEVICE=y 13CONFIG_CGROUP_DEVICE=y
15CONFIG_CPUSETS=y 14CONFIG_CPUSETS=y
diff --git a/arch/sh/include/asm/kgdb.h b/arch/sh/include/asm/kgdb.h
index 4235e228d921..f3613952d1ae 100644
--- a/arch/sh/include/asm/kgdb.h
+++ b/arch/sh/include/asm/kgdb.h
@@ -34,5 +34,6 @@ static inline void arch_kgdb_breakpoint(void)
34 34
35#define CACHE_FLUSH_IS_SAFE 1 35#define CACHE_FLUSH_IS_SAFE 1
36#define BREAK_INSTR_SIZE 2 36#define BREAK_INSTR_SIZE 2
37#define GDB_ADJUSTS_BREAK_OFFSET
37 38
38#endif /* __ASM_SH_KGDB_H */ 39#endif /* __ASM_SH_KGDB_H */
diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h
index de167d3a1a80..40725b4a8018 100644
--- a/arch/sh/include/asm/ptrace.h
+++ b/arch/sh/include/asm/ptrace.h
@@ -40,9 +40,8 @@
40#include <asm/system.h> 40#include <asm/system.h>
41 41
42#define user_mode(regs) (((regs)->sr & 0x40000000)==0) 42#define user_mode(regs) (((regs)->sr & 0x40000000)==0)
43#define user_stack_pointer(_regs) ((unsigned long)(_regs)->regs[15])
44#define kernel_stack_pointer(_regs) ((unsigned long)(_regs)->regs[15]) 43#define kernel_stack_pointer(_regs) ((unsigned long)(_regs)->regs[15])
45#define instruction_pointer(regs) ((unsigned long)(regs)->pc) 44#define GET_USP(regs) ((regs)->regs[15])
46 45
47extern void show_regs(struct pt_regs *); 46extern void show_regs(struct pt_regs *);
48 47
@@ -139,6 +138,9 @@ static inline unsigned long profile_pc(struct pt_regs *regs)
139 138
140 return pc; 139 return pc;
141} 140}
141#define profile_pc profile_pc
142
143#include <asm-generic/ptrace.h>
142#endif /* __KERNEL__ */ 144#endif /* __KERNEL__ */
143 145
144#endif /* __ASM_SH_PTRACE_H */ 146#endif /* __ASM_SH_PTRACE_H */
diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h
index 64eb41a063e8..e14567a7e9a1 100644
--- a/arch/sh/include/asm/suspend.h
+++ b/arch/sh/include/asm/suspend.h
@@ -3,7 +3,6 @@
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#include <linux/notifier.h> 5#include <linux/notifier.h>
6static inline int arch_prepare_suspend(void) { return 0; }
7 6
8#include <asm/ptrace.h> 7#include <asm/ptrace.h>
9 8
diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h
index bb7d2702c2c9..3432008d2888 100644
--- a/arch/sh/include/asm/unistd_32.h
+++ b/arch/sh/include/asm/unistd_32.h
@@ -374,8 +374,9 @@
374#define __NR_clock_adjtime 361 374#define __NR_clock_adjtime 361
375#define __NR_syncfs 362 375#define __NR_syncfs 362
376#define __NR_sendmmsg 363 376#define __NR_sendmmsg 363
377#define __NR_setns 364
377 378
378#define NR_syscalls 364 379#define NR_syscalls 365
379 380
380#ifdef __KERNEL__ 381#ifdef __KERNEL__
381 382
diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h
index 46327cea1e5c..ec9898665f23 100644
--- a/arch/sh/include/asm/unistd_64.h
+++ b/arch/sh/include/asm/unistd_64.h
@@ -395,10 +395,11 @@
395#define __NR_clock_adjtime 372 395#define __NR_clock_adjtime 372
396#define __NR_syncfs 373 396#define __NR_syncfs 373
397#define __NR_sendmmsg 374 397#define __NR_sendmmsg 374
398#define __NR_setns 375
398 399
399#ifdef __KERNEL__ 400#ifdef __KERNEL__
400 401
401#define NR_syscalls 375 402#define NR_syscalls 376
402 403
403#define __ARCH_WANT_IPC_PARSE_VERSION 404#define __ARCH_WANT_IPC_PARSE_VERSION
404#define __ARCH_WANT_OLD_READDIR 405#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index 7c486f3e3a3c..39b051de4c7c 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -381,3 +381,4 @@ ENTRY(sys_call_table)
381 .long sys_clock_adjtime 381 .long sys_clock_adjtime
382 .long sys_syncfs 382 .long sys_syncfs
383 .long sys_sendmmsg 383 .long sys_sendmmsg
384 .long sys_setns
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index ba1a737afe80..089c4d825d08 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -401,3 +401,4 @@ sys_call_table:
401 .long sys_clock_adjtime 401 .long sys_clock_adjtime
402 .long sys_syncfs 402 .long sys_syncfs
403 .long sys_sendmmsg 403 .long sys_sendmmsg
404 .long sys_setns /* 375 */
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 63a027c9ada5..af32e17fa170 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -190,14 +190,6 @@ config RWSEM_XCHGADD_ALGORITHM
190 bool 190 bool
191 default y if SPARC64 191 default y if SPARC64
192 192
193config GENERIC_FIND_NEXT_BIT
194 bool
195 default y
196
197config GENERIC_FIND_BIT_LE
198 bool
199 default y
200
201config GENERIC_HWEIGHT 193config GENERIC_HWEIGHT
202 bool 194 bool
203 default y if !ULTRA_HAS_POPULATION_COUNT 195 default y if !ULTRA_HAS_POPULATION_COUNT
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index c5387ed0add8..6260d5deeabc 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -405,8 +405,9 @@
405#define __NR_clock_adjtime 334 405#define __NR_clock_adjtime 334
406#define __NR_syncfs 335 406#define __NR_syncfs 335
407#define __NR_sendmmsg 336 407#define __NR_sendmmsg 336
408#define __NR_setns 337
408 409
409#define NR_syscalls 337 410#define NR_syscalls 338
410 411
411#ifdef __32bit_syscall_numbers__ 412#ifdef __32bit_syscall_numbers__
412/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, 413/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 332c83ff7701..6e492d59f6b1 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -84,4 +84,4 @@ sys_call_table:
84/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv 84/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
85/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init 85/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
87/*335*/ .long sys_syncfs, sys_sendmmsg 87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 43887ca0be0e..f566518483b5 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -85,7 +85,7 @@ sys_call_table32:
85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv 85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init 86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
87/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime 87/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
88 .word sys_syncfs, compat_sys_sendmmsg 88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns
89 89
90#endif /* CONFIG_COMPAT */ 90#endif /* CONFIG_COMPAT */
91 91
@@ -162,4 +162,4 @@ sys_call_table:
162/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv 162/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
163 .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init 163 .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
164/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 164/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
165 .word sys_syncfs, sys_sendmmsg 165 .word sys_syncfs, sys_sendmmsg, sys_setns
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 635e1bfb1c5d..e1e50101b3bb 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -5,7 +5,6 @@ config TILE
5 def_bool y 5 def_bool y
6 select HAVE_KVM if !TILEGX 6 select HAVE_KVM if !TILEGX
7 select GENERIC_FIND_FIRST_BIT 7 select GENERIC_FIND_FIRST_BIT
8 select GENERIC_FIND_NEXT_BIT
9 select USE_GENERIC_SMP_HELPERS 8 select USE_GENERIC_SMP_HELPERS
10 select CC_OPTIMIZE_FOR_SIZE 9 select CC_OPTIMIZE_FOR_SIZE
11 select HAVE_GENERIC_HARDIRQS 10 select HAVE_GENERIC_HARDIRQS
diff --git a/arch/um/Kconfig.x86 b/arch/um/Kconfig.x86
index 795ea8e869f4..8aae429a56e2 100644
--- a/arch/um/Kconfig.x86
+++ b/arch/um/Kconfig.x86
@@ -15,7 +15,6 @@ endmenu
15config UML_X86 15config UML_X86
16 def_bool y 16 def_bool y
17 select GENERIC_FIND_FIRST_BIT 17 select GENERIC_FIND_FIRST_BIT
18 select GENERIC_FIND_NEXT_BIT
19 18
20config 64BIT 19config 64BIT
21 bool 20 bool
diff --git a/arch/unicore32/include/asm/suspend.h b/arch/unicore32/include/asm/suspend.h
index 88a9c0f32b21..65bad75c7e96 100644
--- a/arch/unicore32/include/asm/suspend.h
+++ b/arch/unicore32/include/asm/suspend.h
@@ -14,7 +14,6 @@
14#define __UNICORE_SUSPEND_H__ 14#define __UNICORE_SUSPEND_H__
15 15
16#ifndef __ASSEMBLY__ 16#ifndef __ASSEMBLY__
17static inline int arch_prepare_suspend(void) { return 0; }
18 17
19#include <asm/ptrace.h> 18#include <asm/ptrace.h>
20 19
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 483775f42d2a..da349723d411 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -64,7 +64,6 @@ config X86
64 select HAVE_GENERIC_HARDIRQS 64 select HAVE_GENERIC_HARDIRQS
65 select HAVE_SPARSE_IRQ 65 select HAVE_SPARSE_IRQ
66 select GENERIC_FIND_FIRST_BIT 66 select GENERIC_FIND_FIRST_BIT
67 select GENERIC_FIND_NEXT_BIT
68 select GENERIC_IRQ_PROBE 67 select GENERIC_IRQ_PROBE
69 select GENERIC_PENDING_IRQ if SMP 68 select GENERIC_PENDING_IRQ if SMP
70 select GENERIC_IRQ_SHOW 69 select GENERIC_IRQ_SHOW
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 6f9872658dd2..2bf18059fbea 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -10,7 +10,6 @@ CONFIG_TASK_IO_ACCOUNTING=y
10CONFIG_AUDIT=y 10CONFIG_AUDIT=y
11CONFIG_LOG_BUF_SHIFT=18 11CONFIG_LOG_BUF_SHIFT=18
12CONFIG_CGROUPS=y 12CONFIG_CGROUPS=y
13CONFIG_CGROUP_NS=y
14CONFIG_CGROUP_FREEZER=y 13CONFIG_CGROUP_FREEZER=y
15CONFIG_CPUSETS=y 14CONFIG_CPUSETS=y
16CONFIG_CGROUP_CPUACCT=y 15CONFIG_CGROUP_CPUACCT=y
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index ee01a9d5d4f0..22a0dc8e51dd 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -11,7 +11,6 @@ CONFIG_TASK_IO_ACCOUNTING=y
11CONFIG_AUDIT=y 11CONFIG_AUDIT=y
12CONFIG_LOG_BUF_SHIFT=18 12CONFIG_LOG_BUF_SHIFT=18
13CONFIG_CGROUPS=y 13CONFIG_CGROUPS=y
14CONFIG_CGROUP_NS=y
15CONFIG_CGROUP_FREEZER=y 14CONFIG_CGROUP_FREEZER=y
16CONFIG_CPUSETS=y 15CONFIG_CPUSETS=y
17CONFIG_CGROUP_CPUACCT=y 16CONFIG_CGROUP_CPUACCT=y
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 95f5826be458..c1870dddd322 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -849,4 +849,5 @@ ia32_sys_call_table:
849 .quad compat_sys_clock_adjtime 849 .quad compat_sys_clock_adjtime
850 .quad sys_syncfs 850 .quad sys_syncfs
851 .quad compat_sys_sendmmsg /* 345 */ 851 .quad compat_sys_sendmmsg /* 345 */
852 .quad sys_setns
852ia32_syscall_end: 853ia32_syscall_end:
diff --git a/arch/x86/include/asm/kgdb.h b/arch/x86/include/asm/kgdb.h
index 396f5b5fc4d7..77e95f54570a 100644
--- a/arch/x86/include/asm/kgdb.h
+++ b/arch/x86/include/asm/kgdb.h
@@ -77,6 +77,7 @@ static inline void arch_kgdb_breakpoint(void)
77} 77}
78#define BREAK_INSTR_SIZE 1 78#define BREAK_INSTR_SIZE 1
79#define CACHE_FLUSH_IS_SAFE 1 79#define CACHE_FLUSH_IS_SAFE 1
80#define GDB_ADJUSTS_BREAK_OFFSET
80 81
81extern int kgdb_ll_trap(int cmd, const char *str, 82extern int kgdb_ll_trap(int cmd, const char *str,
82 struct pt_regs *regs, long err, int trap, int sig); 83 struct pt_regs *regs, long err, int trap, int sig);
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 1babf8adecdf..94e7618fcac8 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -136,6 +136,7 @@ struct cpuinfo_x86;
136struct task_struct; 136struct task_struct;
137 137
138extern unsigned long profile_pc(struct pt_regs *regs); 138extern unsigned long profile_pc(struct pt_regs *regs);
139#define profile_pc profile_pc
139 140
140extern unsigned long 141extern unsigned long
141convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); 142convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
@@ -202,20 +203,11 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
202#endif 203#endif
203} 204}
204 205
205static inline unsigned long instruction_pointer(struct pt_regs *regs) 206#define GET_IP(regs) ((regs)->ip)
206{ 207#define GET_FP(regs) ((regs)->bp)
207 return regs->ip; 208#define GET_USP(regs) ((regs)->sp)
208}
209
210static inline unsigned long frame_pointer(struct pt_regs *regs)
211{
212 return regs->bp;
213}
214 209
215static inline unsigned long user_stack_pointer(struct pt_regs *regs) 210#include <asm-generic/ptrace.h>
216{
217 return regs->sp;
218}
219 211
220/* Query offset/name of register from its name/offset */ 212/* Query offset/name of register from its name/offset */
221extern int regs_query_register_offset(const char *name); 213extern int regs_query_register_offset(const char *name);
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index fd921c3a6841..487055c8c1aa 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -9,8 +9,6 @@
9#include <asm/desc.h> 9#include <asm/desc.h>
10#include <asm/i387.h> 10#include <asm/i387.h>
11 11
12static inline int arch_prepare_suspend(void) { return 0; }
13
14/* image of the saved processor state */ 12/* image of the saved processor state */
15struct saved_context { 13struct saved_context {
16 u16 es, fs, gs, ss; 14 u16 es, fs, gs, ss;
diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
index 8d942afae681..09b0bf104156 100644
--- a/arch/x86/include/asm/suspend_64.h
+++ b/arch/x86/include/asm/suspend_64.h
@@ -9,11 +9,6 @@
9#include <asm/desc.h> 9#include <asm/desc.h>
10#include <asm/i387.h> 10#include <asm/i387.h>
11 11
12static inline int arch_prepare_suspend(void)
13{
14 return 0;
15}
16
17/* 12/*
18 * Image of the saved processor state, used by the low level ACPI suspend to 13 * Image of the saved processor state, used by the low level ACPI suspend to
19 * RAM code and by the low level hibernation code. 14 * RAM code and by the low level hibernation code.
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h
index fb6a625c99bf..593485b38ab3 100644
--- a/arch/x86/include/asm/unistd_32.h
+++ b/arch/x86/include/asm/unistd_32.h
@@ -351,10 +351,11 @@
351#define __NR_clock_adjtime 343 351#define __NR_clock_adjtime 343
352#define __NR_syncfs 344 352#define __NR_syncfs 344
353#define __NR_sendmmsg 345 353#define __NR_sendmmsg 345
354#define __NR_setns 346
354 355
355#ifdef __KERNEL__ 356#ifdef __KERNEL__
356 357
357#define NR_syscalls 346 358#define NR_syscalls 347
358 359
359#define __ARCH_WANT_IPC_PARSE_VERSION 360#define __ARCH_WANT_IPC_PARSE_VERSION
360#define __ARCH_WANT_OLD_READDIR 361#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index 79f90eb15aad..705bf139288c 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -679,6 +679,8 @@ __SYSCALL(__NR_clock_adjtime, sys_clock_adjtime)
679__SYSCALL(__NR_syncfs, sys_syncfs) 679__SYSCALL(__NR_syncfs, sys_syncfs)
680#define __NR_sendmmsg 307 680#define __NR_sendmmsg 307
681__SYSCALL(__NR_sendmmsg, sys_sendmmsg) 681__SYSCALL(__NR_sendmmsg, sys_sendmmsg)
682#define __NR_setns 308
683__SYSCALL(__NR_setns, sys_setns)
682 684
683#ifndef __NO_STUBS 685#ifndef __NO_STUBS
684#define __ARCH_WANT_OLD_READDIR 686#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index 32cbffb0c494..fbb0a045a1a2 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -345,3 +345,4 @@ ENTRY(sys_call_table)
345 .long sys_clock_adjtime 345 .long sys_clock_adjtime
346 .long sys_syncfs 346 .long sys_syncfs
347 .long sys_sendmmsg /* 345 */ 347 .long sys_sendmmsg /* 345 */
348 .long sys_setns
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 02d752460371..dc708dcc62f1 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -75,67 +75,12 @@
75#include "mmu.h" 75#include "mmu.h"
76#include "debugfs.h" 76#include "debugfs.h"
77 77
78#define MMU_UPDATE_HISTO 30
79
80/* 78/*
81 * Protects atomic reservation decrease/increase against concurrent increases. 79 * Protects atomic reservation decrease/increase against concurrent increases.
82 * Also protects non-atomic updates of current_pages and balloon lists. 80 * Also protects non-atomic updates of current_pages and balloon lists.
83 */ 81 */
84DEFINE_SPINLOCK(xen_reservation_lock); 82DEFINE_SPINLOCK(xen_reservation_lock);
85 83
86#ifdef CONFIG_XEN_DEBUG_FS
87
88static struct {
89 u32 pgd_update;
90 u32 pgd_update_pinned;
91 u32 pgd_update_batched;
92
93 u32 pud_update;
94 u32 pud_update_pinned;
95 u32 pud_update_batched;
96
97 u32 pmd_update;
98 u32 pmd_update_pinned;
99 u32 pmd_update_batched;
100
101 u32 pte_update;
102 u32 pte_update_pinned;
103 u32 pte_update_batched;
104
105 u32 mmu_update;
106 u32 mmu_update_extended;
107 u32 mmu_update_histo[MMU_UPDATE_HISTO];
108
109 u32 prot_commit;
110 u32 prot_commit_batched;
111
112 u32 set_pte_at;
113 u32 set_pte_at_batched;
114 u32 set_pte_at_pinned;
115 u32 set_pte_at_current;
116 u32 set_pte_at_kernel;
117} mmu_stats;
118
119static u8 zero_stats;
120
121static inline void check_zero(void)
122{
123 if (unlikely(zero_stats)) {
124 memset(&mmu_stats, 0, sizeof(mmu_stats));
125 zero_stats = 0;
126 }
127}
128
129#define ADD_STATS(elem, val) \
130 do { check_zero(); mmu_stats.elem += (val); } while(0)
131
132#else /* !CONFIG_XEN_DEBUG_FS */
133
134#define ADD_STATS(elem, val) do { (void)(val); } while(0)
135
136#endif /* CONFIG_XEN_DEBUG_FS */
137
138
139/* 84/*
140 * Identity map, in addition to plain kernel map. This needs to be 85 * Identity map, in addition to plain kernel map. This needs to be
141 * large enough to allocate page table pages to allocate the rest. 86 * large enough to allocate page table pages to allocate the rest.
@@ -243,11 +188,6 @@ static bool xen_page_pinned(void *ptr)
243 return PagePinned(page); 188 return PagePinned(page);
244} 189}
245 190
246static bool xen_iomap_pte(pte_t pte)
247{
248 return pte_flags(pte) & _PAGE_IOMAP;
249}
250
251void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) 191void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
252{ 192{
253 struct multicall_space mcs; 193 struct multicall_space mcs;
@@ -257,7 +197,7 @@ void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
257 u = mcs.args; 197 u = mcs.args;
258 198
259 /* ptep might be kmapped when using 32-bit HIGHPTE */ 199 /* ptep might be kmapped when using 32-bit HIGHPTE */
260 u->ptr = arbitrary_virt_to_machine(ptep).maddr; 200 u->ptr = virt_to_machine(ptep).maddr;
261 u->val = pte_val_ma(pteval); 201 u->val = pte_val_ma(pteval);
262 202
263 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); 203 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
@@ -266,11 +206,6 @@ void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
266} 206}
267EXPORT_SYMBOL_GPL(xen_set_domain_pte); 207EXPORT_SYMBOL_GPL(xen_set_domain_pte);
268 208
269static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
270{
271 xen_set_domain_pte(ptep, pteval, DOMID_IO);
272}
273
274static void xen_extend_mmu_update(const struct mmu_update *update) 209static void xen_extend_mmu_update(const struct mmu_update *update)
275{ 210{
276 struct multicall_space mcs; 211 struct multicall_space mcs;
@@ -279,27 +214,17 @@ static void xen_extend_mmu_update(const struct mmu_update *update)
279 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); 214 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
280 215
281 if (mcs.mc != NULL) { 216 if (mcs.mc != NULL) {
282 ADD_STATS(mmu_update_extended, 1);
283 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
284
285 mcs.mc->args[1]++; 217 mcs.mc->args[1]++;
286
287 if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
288 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
289 else
290 ADD_STATS(mmu_update_histo[0], 1);
291 } else { 218 } else {
292 ADD_STATS(mmu_update, 1);
293 mcs = __xen_mc_entry(sizeof(*u)); 219 mcs = __xen_mc_entry(sizeof(*u));
294 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); 220 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
295 ADD_STATS(mmu_update_histo[1], 1);
296 } 221 }
297 222
298 u = mcs.args; 223 u = mcs.args;
299 *u = *update; 224 *u = *update;
300} 225}
301 226
302void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) 227static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
303{ 228{
304 struct mmu_update u; 229 struct mmu_update u;
305 230
@@ -312,17 +237,13 @@ void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
312 u.val = pmd_val_ma(val); 237 u.val = pmd_val_ma(val);
313 xen_extend_mmu_update(&u); 238 xen_extend_mmu_update(&u);
314 239
315 ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
316
317 xen_mc_issue(PARAVIRT_LAZY_MMU); 240 xen_mc_issue(PARAVIRT_LAZY_MMU);
318 241
319 preempt_enable(); 242 preempt_enable();
320} 243}
321 244
322void xen_set_pmd(pmd_t *ptr, pmd_t val) 245static void xen_set_pmd(pmd_t *ptr, pmd_t val)
323{ 246{
324 ADD_STATS(pmd_update, 1);
325
326 /* If page is not pinned, we can just update the entry 247 /* If page is not pinned, we can just update the entry
327 directly */ 248 directly */
328 if (!xen_page_pinned(ptr)) { 249 if (!xen_page_pinned(ptr)) {
@@ -330,8 +251,6 @@ void xen_set_pmd(pmd_t *ptr, pmd_t val)
330 return; 251 return;
331 } 252 }
332 253
333 ADD_STATS(pmd_update_pinned, 1);
334
335 xen_set_pmd_hyper(ptr, val); 254 xen_set_pmd_hyper(ptr, val);
336} 255}
337 256
@@ -344,35 +263,34 @@ void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
344 set_pte_vaddr(vaddr, mfn_pte(mfn, flags)); 263 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
345} 264}
346 265
347void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, 266static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
348 pte_t *ptep, pte_t pteval)
349{ 267{
350 if (xen_iomap_pte(pteval)) { 268 struct mmu_update u;
351 xen_set_iomap_pte(ptep, pteval);
352 goto out;
353 }
354 269
355 ADD_STATS(set_pte_at, 1); 270 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
356// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep)); 271 return false;
357 ADD_STATS(set_pte_at_current, mm == current->mm);
358 ADD_STATS(set_pte_at_kernel, mm == &init_mm);
359 272
360 if (mm == current->mm || mm == &init_mm) { 273 xen_mc_batch();
361 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
362 struct multicall_space mcs;
363 mcs = xen_mc_entry(0);
364 274
365 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0); 275 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
366 ADD_STATS(set_pte_at_batched, 1); 276 u.val = pte_val_ma(pteval);
367 xen_mc_issue(PARAVIRT_LAZY_MMU); 277 xen_extend_mmu_update(&u);
368 goto out; 278
369 } else 279 xen_mc_issue(PARAVIRT_LAZY_MMU);
370 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
371 goto out;
372 }
373 xen_set_pte(ptep, pteval);
374 280
375out: return; 281 return true;
282}
283
284static void xen_set_pte(pte_t *ptep, pte_t pteval)
285{
286 if (!xen_batched_set_pte(ptep, pteval))
287 native_set_pte(ptep, pteval);
288}
289
290static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
291 pte_t *ptep, pte_t pteval)
292{
293 xen_set_pte(ptep, pteval);
376} 294}
377 295
378pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, 296pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
@@ -389,13 +307,10 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
389 307
390 xen_mc_batch(); 308 xen_mc_batch();
391 309
392 u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; 310 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
393 u.val = pte_val_ma(pte); 311 u.val = pte_val_ma(pte);
394 xen_extend_mmu_update(&u); 312 xen_extend_mmu_update(&u);
395 313
396 ADD_STATS(prot_commit, 1);
397 ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
398
399 xen_mc_issue(PARAVIRT_LAZY_MMU); 314 xen_mc_issue(PARAVIRT_LAZY_MMU);
400} 315}
401 316
@@ -463,7 +378,7 @@ static pteval_t iomap_pte(pteval_t val)
463 return val; 378 return val;
464} 379}
465 380
466pteval_t xen_pte_val(pte_t pte) 381static pteval_t xen_pte_val(pte_t pte)
467{ 382{
468 pteval_t pteval = pte.pte; 383 pteval_t pteval = pte.pte;
469 384
@@ -480,7 +395,7 @@ pteval_t xen_pte_val(pte_t pte)
480} 395}
481PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); 396PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
482 397
483pgdval_t xen_pgd_val(pgd_t pgd) 398static pgdval_t xen_pgd_val(pgd_t pgd)
484{ 399{
485 return pte_mfn_to_pfn(pgd.pgd); 400 return pte_mfn_to_pfn(pgd.pgd);
486} 401}
@@ -511,7 +426,7 @@ void xen_set_pat(u64 pat)
511 WARN_ON(pat != 0x0007010600070106ull); 426 WARN_ON(pat != 0x0007010600070106ull);
512} 427}
513 428
514pte_t xen_make_pte(pteval_t pte) 429static pte_t xen_make_pte(pteval_t pte)
515{ 430{
516 phys_addr_t addr = (pte & PTE_PFN_MASK); 431 phys_addr_t addr = (pte & PTE_PFN_MASK);
517 432
@@ -581,20 +496,20 @@ pte_t xen_make_pte_debug(pteval_t pte)
581PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug); 496PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug);
582#endif 497#endif
583 498
584pgd_t xen_make_pgd(pgdval_t pgd) 499static pgd_t xen_make_pgd(pgdval_t pgd)
585{ 500{
586 pgd = pte_pfn_to_mfn(pgd); 501 pgd = pte_pfn_to_mfn(pgd);
587 return native_make_pgd(pgd); 502 return native_make_pgd(pgd);
588} 503}
589PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); 504PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
590 505
591pmdval_t xen_pmd_val(pmd_t pmd) 506static pmdval_t xen_pmd_val(pmd_t pmd)
592{ 507{
593 return pte_mfn_to_pfn(pmd.pmd); 508 return pte_mfn_to_pfn(pmd.pmd);
594} 509}
595PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); 510PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
596 511
597void xen_set_pud_hyper(pud_t *ptr, pud_t val) 512static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
598{ 513{
599 struct mmu_update u; 514 struct mmu_update u;
600 515
@@ -607,17 +522,13 @@ void xen_set_pud_hyper(pud_t *ptr, pud_t val)
607 u.val = pud_val_ma(val); 522 u.val = pud_val_ma(val);
608 xen_extend_mmu_update(&u); 523 xen_extend_mmu_update(&u);
609 524
610 ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
611
612 xen_mc_issue(PARAVIRT_LAZY_MMU); 525 xen_mc_issue(PARAVIRT_LAZY_MMU);
613 526
614 preempt_enable(); 527 preempt_enable();
615} 528}
616 529
617void xen_set_pud(pud_t *ptr, pud_t val) 530static void xen_set_pud(pud_t *ptr, pud_t val)
618{ 531{
619 ADD_STATS(pud_update, 1);
620
621 /* If page is not pinned, we can just update the entry 532 /* If page is not pinned, we can just update the entry
622 directly */ 533 directly */
623 if (!xen_page_pinned(ptr)) { 534 if (!xen_page_pinned(ptr)) {
@@ -625,56 +536,28 @@ void xen_set_pud(pud_t *ptr, pud_t val)
625 return; 536 return;
626 } 537 }
627 538
628 ADD_STATS(pud_update_pinned, 1);
629
630 xen_set_pud_hyper(ptr, val); 539 xen_set_pud_hyper(ptr, val);
631} 540}
632 541
633void xen_set_pte(pte_t *ptep, pte_t pte)
634{
635 if (xen_iomap_pte(pte)) {
636 xen_set_iomap_pte(ptep, pte);
637 return;
638 }
639
640 ADD_STATS(pte_update, 1);
641// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
642 ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
643
644#ifdef CONFIG_X86_PAE 542#ifdef CONFIG_X86_PAE
645 ptep->pte_high = pte.pte_high; 543static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
646 smp_wmb();
647 ptep->pte_low = pte.pte_low;
648#else
649 *ptep = pte;
650#endif
651}
652
653#ifdef CONFIG_X86_PAE
654void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
655{ 544{
656 if (xen_iomap_pte(pte)) {
657 xen_set_iomap_pte(ptep, pte);
658 return;
659 }
660
661 set_64bit((u64 *)ptep, native_pte_val(pte)); 545 set_64bit((u64 *)ptep, native_pte_val(pte));
662} 546}
663 547
664void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 548static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
665{ 549{
666 ptep->pte_low = 0; 550 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
667 smp_wmb(); /* make sure low gets written first */ 551 native_pte_clear(mm, addr, ptep);
668 ptep->pte_high = 0;
669} 552}
670 553
671void xen_pmd_clear(pmd_t *pmdp) 554static void xen_pmd_clear(pmd_t *pmdp)
672{ 555{
673 set_pmd(pmdp, __pmd(0)); 556 set_pmd(pmdp, __pmd(0));
674} 557}
675#endif /* CONFIG_X86_PAE */ 558#endif /* CONFIG_X86_PAE */
676 559
677pmd_t xen_make_pmd(pmdval_t pmd) 560static pmd_t xen_make_pmd(pmdval_t pmd)
678{ 561{
679 pmd = pte_pfn_to_mfn(pmd); 562 pmd = pte_pfn_to_mfn(pmd);
680 return native_make_pmd(pmd); 563 return native_make_pmd(pmd);
@@ -682,13 +565,13 @@ pmd_t xen_make_pmd(pmdval_t pmd)
682PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); 565PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
683 566
684#if PAGETABLE_LEVELS == 4 567#if PAGETABLE_LEVELS == 4
685pudval_t xen_pud_val(pud_t pud) 568static pudval_t xen_pud_val(pud_t pud)
686{ 569{
687 return pte_mfn_to_pfn(pud.pud); 570 return pte_mfn_to_pfn(pud.pud);
688} 571}
689PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); 572PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
690 573
691pud_t xen_make_pud(pudval_t pud) 574static pud_t xen_make_pud(pudval_t pud)
692{ 575{
693 pud = pte_pfn_to_mfn(pud); 576 pud = pte_pfn_to_mfn(pud);
694 577
@@ -696,7 +579,7 @@ pud_t xen_make_pud(pudval_t pud)
696} 579}
697PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); 580PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
698 581
699pgd_t *xen_get_user_pgd(pgd_t *pgd) 582static pgd_t *xen_get_user_pgd(pgd_t *pgd)
700{ 583{
701 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); 584 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
702 unsigned offset = pgd - pgd_page; 585 unsigned offset = pgd - pgd_page;
@@ -728,7 +611,7 @@ static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
728 * 2. It is always pinned 611 * 2. It is always pinned
729 * 3. It has no user pagetable attached to it 612 * 3. It has no user pagetable attached to it
730 */ 613 */
731void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) 614static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
732{ 615{
733 preempt_disable(); 616 preempt_disable();
734 617
@@ -741,12 +624,10 @@ void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
741 preempt_enable(); 624 preempt_enable();
742} 625}
743 626
744void xen_set_pgd(pgd_t *ptr, pgd_t val) 627static void xen_set_pgd(pgd_t *ptr, pgd_t val)
745{ 628{
746 pgd_t *user_ptr = xen_get_user_pgd(ptr); 629 pgd_t *user_ptr = xen_get_user_pgd(ptr);
747 630
748 ADD_STATS(pgd_update, 1);
749
750 /* If page is not pinned, we can just update the entry 631 /* If page is not pinned, we can just update the entry
751 directly */ 632 directly */
752 if (!xen_page_pinned(ptr)) { 633 if (!xen_page_pinned(ptr)) {
@@ -758,9 +639,6 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val)
758 return; 639 return;
759 } 640 }
760 641
761 ADD_STATS(pgd_update_pinned, 1);
762 ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
763
764 /* If it's pinned, then we can at least batch the kernel and 642 /* If it's pinned, then we can at least batch the kernel and
765 user updates together. */ 643 user updates together. */
766 xen_mc_batch(); 644 xen_mc_batch();
@@ -1162,14 +1040,14 @@ void xen_mm_unpin_all(void)
1162 spin_unlock(&pgd_lock); 1040 spin_unlock(&pgd_lock);
1163} 1041}
1164 1042
1165void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) 1043static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1166{ 1044{
1167 spin_lock(&next->page_table_lock); 1045 spin_lock(&next->page_table_lock);
1168 xen_pgd_pin(next); 1046 xen_pgd_pin(next);
1169 spin_unlock(&next->page_table_lock); 1047 spin_unlock(&next->page_table_lock);
1170} 1048}
1171 1049
1172void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) 1050static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1173{ 1051{
1174 spin_lock(&mm->page_table_lock); 1052 spin_lock(&mm->page_table_lock);
1175 xen_pgd_pin(mm); 1053 xen_pgd_pin(mm);
@@ -1256,7 +1134,7 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
1256 * pagetable because of lazy tlb flushing. This means we need need to 1134 * pagetable because of lazy tlb flushing. This means we need need to
1257 * switch all CPUs off this pagetable before we can unpin it. 1135 * switch all CPUs off this pagetable before we can unpin it.
1258 */ 1136 */
1259void xen_exit_mmap(struct mm_struct *mm) 1137static void xen_exit_mmap(struct mm_struct *mm)
1260{ 1138{
1261 get_cpu(); /* make sure we don't move around */ 1139 get_cpu(); /* make sure we don't move around */
1262 xen_drop_mm_ref(mm); 1140 xen_drop_mm_ref(mm);
@@ -2371,7 +2249,7 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2371 struct remap_data *rmd = data; 2249 struct remap_data *rmd = data;
2372 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot)); 2250 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
2373 2251
2374 rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr; 2252 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
2375 rmd->mmu_update->val = pte_val_ma(pte); 2253 rmd->mmu_update->val = pte_val_ma(pte);
2376 rmd->mmu_update++; 2254 rmd->mmu_update++;
2377 2255
@@ -2425,7 +2303,6 @@ out:
2425EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 2303EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
2426 2304
2427#ifdef CONFIG_XEN_DEBUG_FS 2305#ifdef CONFIG_XEN_DEBUG_FS
2428
2429static int p2m_dump_open(struct inode *inode, struct file *filp) 2306static int p2m_dump_open(struct inode *inode, struct file *filp)
2430{ 2307{
2431 return single_open(filp, p2m_dump_show, NULL); 2308 return single_open(filp, p2m_dump_show, NULL);
@@ -2437,65 +2314,4 @@ static const struct file_operations p2m_dump_fops = {
2437 .llseek = seq_lseek, 2314 .llseek = seq_lseek,
2438 .release = single_release, 2315 .release = single_release,
2439}; 2316};
2440 2317#endif /* CONFIG_XEN_DEBUG_FS */
2441static struct dentry *d_mmu_debug;
2442
2443static int __init xen_mmu_debugfs(void)
2444{
2445 struct dentry *d_xen = xen_init_debugfs();
2446
2447 if (d_xen == NULL)
2448 return -ENOMEM;
2449
2450 d_mmu_debug = debugfs_create_dir("mmu", d_xen);
2451
2452 debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
2453
2454 debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
2455 debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
2456 &mmu_stats.pgd_update_pinned);
2457 debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
2458 &mmu_stats.pgd_update_pinned);
2459
2460 debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
2461 debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
2462 &mmu_stats.pud_update_pinned);
2463 debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
2464 &mmu_stats.pud_update_pinned);
2465
2466 debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
2467 debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
2468 &mmu_stats.pmd_update_pinned);
2469 debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
2470 &mmu_stats.pmd_update_pinned);
2471
2472 debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
2473// debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
2474// &mmu_stats.pte_update_pinned);
2475 debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
2476 &mmu_stats.pte_update_pinned);
2477
2478 debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
2479 debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
2480 &mmu_stats.mmu_update_extended);
2481 xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
2482 mmu_stats.mmu_update_histo, 20);
2483
2484 debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
2485 debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
2486 &mmu_stats.set_pte_at_batched);
2487 debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
2488 &mmu_stats.set_pte_at_current);
2489 debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
2490 &mmu_stats.set_pte_at_kernel);
2491
2492 debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
2493 debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
2494 &mmu_stats.prot_commit_batched);
2495
2496 debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops);
2497 return 0;
2498}
2499fs_initcall(xen_mmu_debugfs);
2500
2501#endif /* CONFIG_XEN_DEBUG_FS */
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h
index 537bb9aab777..73809bb951b4 100644
--- a/arch/x86/xen/mmu.h
+++ b/arch/x86/xen/mmu.h
@@ -15,43 +15,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
15 15
16void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); 16void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
17 17
18
19void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next);
20void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
21void xen_exit_mmap(struct mm_struct *mm);
22
23pteval_t xen_pte_val(pte_t);
24pmdval_t xen_pmd_val(pmd_t);
25pgdval_t xen_pgd_val(pgd_t);
26
27pte_t xen_make_pte(pteval_t);
28pmd_t xen_make_pmd(pmdval_t);
29pgd_t xen_make_pgd(pgdval_t);
30
31void xen_set_pte(pte_t *ptep, pte_t pteval);
32void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
33 pte_t *ptep, pte_t pteval);
34
35#ifdef CONFIG_X86_PAE
36void xen_set_pte_atomic(pte_t *ptep, pte_t pte);
37void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
38void xen_pmd_clear(pmd_t *pmdp);
39#endif /* CONFIG_X86_PAE */
40
41void xen_set_pmd(pmd_t *pmdp, pmd_t pmdval);
42void xen_set_pud(pud_t *ptr, pud_t val);
43void xen_set_pmd_hyper(pmd_t *pmdp, pmd_t pmdval);
44void xen_set_pud_hyper(pud_t *ptr, pud_t val);
45
46#if PAGETABLE_LEVELS == 4
47pudval_t xen_pud_val(pud_t pud);
48pud_t xen_make_pud(pudval_t pudval);
49void xen_set_pgd(pgd_t *pgdp, pgd_t pgd);
50void xen_set_pgd_hyper(pgd_t *pgdp, pgd_t pgd);
51#endif
52
53pgd_t *xen_get_user_pgd(pgd_t *pgd);
54
55pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 18pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
56void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, 19void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
57 pte_t *ptep, pte_t pte); 20 pte_t *ptep, pte_t pte);
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 7c275f5d0df0..5d43c1f8ada8 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -20,12 +20,6 @@ config XTENSA
20config RWSEM_XCHGADD_ALGORITHM 20config RWSEM_XCHGADD_ALGORITHM
21 def_bool y 21 def_bool y
22 22
23config GENERIC_FIND_NEXT_BIT
24 def_bool y
25
26config GENERIC_FIND_BIT_LE
27 def_bool y
28
29config GENERIC_HWEIGHT 23config GENERIC_HWEIGHT
30 def_bool y 24 def_bool y
31 25
diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
index 528042c2951e..a6f934f37f1a 100644
--- a/arch/xtensa/include/asm/unistd.h
+++ b/arch/xtensa/include/asm/unistd.h
@@ -683,8 +683,10 @@ __SYSCALL(305, sys_ni_syscall, 0)
683__SYSCALL(306, sys_eventfd, 1) 683__SYSCALL(306, sys_eventfd, 1)
684#define __NR_recvmmsg 307 684#define __NR_recvmmsg 307
685__SYSCALL(307, sys_recvmmsg, 5) 685__SYSCALL(307, sys_recvmmsg, 5)
686#define __NR_setns 308
687__SYSCALL(308, sys_setns, 2)
686 688
687#define __NR_syscall_count 308 689#define __NR_syscall_count 309
688 690
689/* 691/*
690 * sysxtensa syscall handler 692 * sysxtensa syscall handler
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 07371cfdfae6..bcaf16ee6ad1 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -30,10 +30,8 @@ EXPORT_SYMBOL_GPL(blkio_root_cgroup);
30 30
31static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *, 31static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
32 struct cgroup *); 32 struct cgroup *);
33static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *, 33static int blkiocg_can_attach_task(struct cgroup *, struct task_struct *);
34 struct task_struct *, bool); 34static void blkiocg_attach_task(struct cgroup *, struct task_struct *);
35static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
36 struct cgroup *, struct task_struct *, bool);
37static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *); 35static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
38static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); 36static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
39 37
@@ -46,8 +44,8 @@ static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
46struct cgroup_subsys blkio_subsys = { 44struct cgroup_subsys blkio_subsys = {
47 .name = "blkio", 45 .name = "blkio",
48 .create = blkiocg_create, 46 .create = blkiocg_create,
49 .can_attach = blkiocg_can_attach, 47 .can_attach_task = blkiocg_can_attach_task,
50 .attach = blkiocg_attach, 48 .attach_task = blkiocg_attach_task,
51 .destroy = blkiocg_destroy, 49 .destroy = blkiocg_destroy,
52 .populate = blkiocg_populate, 50 .populate = blkiocg_populate,
53#ifdef CONFIG_BLK_CGROUP 51#ifdef CONFIG_BLK_CGROUP
@@ -1616,9 +1614,7 @@ done:
1616 * of the main cic data structures. For now we allow a task to change 1614 * of the main cic data structures. For now we allow a task to change
1617 * its cgroup only if it's the only owner of its ioc. 1615 * its cgroup only if it's the only owner of its ioc.
1618 */ 1616 */
1619static int blkiocg_can_attach(struct cgroup_subsys *subsys, 1617static int blkiocg_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1620 struct cgroup *cgroup, struct task_struct *tsk,
1621 bool threadgroup)
1622{ 1618{
1623 struct io_context *ioc; 1619 struct io_context *ioc;
1624 int ret = 0; 1620 int ret = 0;
@@ -1633,9 +1629,7 @@ static int blkiocg_can_attach(struct cgroup_subsys *subsys,
1633 return ret; 1629 return ret;
1634} 1630}
1635 1631
1636static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup, 1632static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1637 struct cgroup *prev, struct task_struct *tsk,
1638 bool threadgroup)
1639{ 1633{
1640 struct io_context *ioc; 1634 struct io_context *ioc;
1641 1635
diff --git a/block/blk-core.c b/block/blk-core.c
index c8303e9d919d..d2f8f4049abd 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -345,6 +345,7 @@ void blk_put_queue(struct request_queue *q)
345{ 345{
346 kobject_put(&q->kobj); 346 kobject_put(&q->kobj);
347} 347}
348EXPORT_SYMBOL(blk_put_queue);
348 349
349/* 350/*
350 * Note: If a driver supplied the queue lock, it should not zap that lock 351 * Note: If a driver supplied the queue lock, it should not zap that lock
@@ -566,6 +567,7 @@ int blk_get_queue(struct request_queue *q)
566 567
567 return 1; 568 return 1;
568} 569}
570EXPORT_SYMBOL(blk_get_queue);
569 571
570static inline void blk_free_request(struct request_queue *q, struct request *rq) 572static inline void blk_free_request(struct request_queue *q, struct request *rq)
571{ 573{
@@ -1130,7 +1132,6 @@ static bool bio_attempt_front_merge(struct request_queue *q,
1130 struct request *req, struct bio *bio) 1132 struct request *req, struct bio *bio)
1131{ 1133{
1132 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1134 const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1133 sector_t sector;
1134 1135
1135 if (!ll_front_merge_fn(q, req, bio)) 1136 if (!ll_front_merge_fn(q, req, bio))
1136 return false; 1137 return false;
@@ -1140,8 +1141,6 @@ static bool bio_attempt_front_merge(struct request_queue *q,
1140 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1141 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1141 blk_rq_set_mixed_merge(req); 1142 blk_rq_set_mixed_merge(req);
1142 1143
1143 sector = bio->bi_sector;
1144
1145 bio->bi_next = req->bio; 1144 bio->bi_next = req->bio;
1146 req->bio = bio; 1145 req->bio = bio;
1147 1146
diff --git a/block/genhd.c b/block/genhd.c
index 2dd988723d73..95822ae25cfe 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1728,7 +1728,7 @@ static void disk_add_events(struct gendisk *disk)
1728{ 1728{
1729 struct disk_events *ev; 1729 struct disk_events *ev;
1730 1730
1731 if (!disk->fops->check_events || !(disk->events | disk->async_events)) 1731 if (!disk->fops->check_events)
1732 return; 1732 return;
1733 1733
1734 ev = kzalloc(sizeof(*ev), GFP_KERNEL); 1734 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 7025593a58c8..d74926e0939e 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -603,6 +603,10 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
603 if (ret) 603 if (ret)
604 goto err_out; 604 goto err_out;
605 605
606 /* Hard-coded primecell ID instead of plug-n-play */
607 if (dev->periphid != 0)
608 goto skip_probe;
609
606 /* 610 /*
607 * Dynamically calculate the size of the resource 611 * Dynamically calculate the size of the resource
608 * and use this for iomap 612 * and use this for iomap
@@ -643,6 +647,7 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
643 if (ret) 647 if (ret)
644 goto err_release; 648 goto err_release;
645 649
650 skip_probe:
646 ret = device_add(&dev->dev); 651 ret = device_add(&dev->dev);
647 if (ret) 652 if (ret)
648 goto err_release; 653 goto err_release;
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index ffd8797faf4f..471a04013fe0 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include "bcma_private.h" 8#include "bcma_private.h"
9#include <linux/slab.h>
9#include <linux/bcma/bcma.h> 10#include <linux/bcma/bcma.h>
10#include <linux/pci.h> 11#include <linux/pci.h>
11 12
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index b7f51e4594f8..dba1c32e1ddf 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -35,10 +35,6 @@
35 */ 35 */
36struct brd_device { 36struct brd_device {
37 int brd_number; 37 int brd_number;
38 int brd_refcnt;
39 loff_t brd_offset;
40 loff_t brd_sizelimit;
41 unsigned brd_blocksize;
42 38
43 struct request_queue *brd_queue; 39 struct request_queue *brd_queue;
44 struct gendisk *brd_disk; 40 struct gendisk *brd_disk;
@@ -440,11 +436,11 @@ static int rd_nr;
440int rd_size = CONFIG_BLK_DEV_RAM_SIZE; 436int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
441static int max_part; 437static int max_part;
442static int part_shift; 438static int part_shift;
443module_param(rd_nr, int, 0); 439module_param(rd_nr, int, S_IRUGO);
444MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices"); 440MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
445module_param(rd_size, int, 0); 441module_param(rd_size, int, S_IRUGO);
446MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); 442MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
447module_param(max_part, int, 0); 443module_param(max_part, int, S_IRUGO);
448MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk"); 444MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk");
449MODULE_LICENSE("GPL"); 445MODULE_LICENSE("GPL");
450MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR); 446MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
@@ -552,7 +548,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
552 struct kobject *kobj; 548 struct kobject *kobj;
553 549
554 mutex_lock(&brd_devices_mutex); 550 mutex_lock(&brd_devices_mutex);
555 brd = brd_init_one(dev & MINORMASK); 551 brd = brd_init_one(MINOR(dev) >> part_shift);
556 kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM); 552 kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM);
557 mutex_unlock(&brd_devices_mutex); 553 mutex_unlock(&brd_devices_mutex);
558 554
@@ -575,25 +571,39 @@ static int __init brd_init(void)
575 * 571 *
576 * (1) if rd_nr is specified, create that many upfront, and this 572 * (1) if rd_nr is specified, create that many upfront, and this
577 * also becomes a hard limit. 573 * also becomes a hard limit.
578 * (2) if rd_nr is not specified, create 1 rd device on module 574 * (2) if rd_nr is not specified, create CONFIG_BLK_DEV_RAM_COUNT
579 * load, user can further extend brd device by create dev node 575 * (default 16) rd device on module load, user can further
580 * themselves and have kernel automatically instantiate actual 576 * extend brd device by create dev node themselves and have
581 * device on-demand. 577 * kernel automatically instantiate actual device on-demand.
582 */ 578 */
583 579
584 part_shift = 0; 580 part_shift = 0;
585 if (max_part > 0) 581 if (max_part > 0) {
586 part_shift = fls(max_part); 582 part_shift = fls(max_part);
587 583
584 /*
585 * Adjust max_part according to part_shift as it is exported
586 * to user space so that user can decide correct minor number
587 * if [s]he want to create more devices.
588 *
589 * Note that -1 is required because partition 0 is reserved
590 * for the whole disk.
591 */
592 max_part = (1UL << part_shift) - 1;
593 }
594
595 if ((1UL << part_shift) > DISK_MAX_PARTS)
596 return -EINVAL;
597
588 if (rd_nr > 1UL << (MINORBITS - part_shift)) 598 if (rd_nr > 1UL << (MINORBITS - part_shift))
589 return -EINVAL; 599 return -EINVAL;
590 600
591 if (rd_nr) { 601 if (rd_nr) {
592 nr = rd_nr; 602 nr = rd_nr;
593 range = rd_nr; 603 range = rd_nr << part_shift;
594 } else { 604 } else {
595 nr = CONFIG_BLK_DEV_RAM_COUNT; 605 nr = CONFIG_BLK_DEV_RAM_COUNT;
596 range = 1UL << (MINORBITS - part_shift); 606 range = 1UL << MINORBITS;
597 } 607 }
598 608
599 if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) 609 if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
@@ -632,7 +642,7 @@ static void __exit brd_exit(void)
632 unsigned long range; 642 unsigned long range;
633 struct brd_device *brd, *next; 643 struct brd_device *brd, *next;
634 644
635 range = rd_nr ? rd_nr : 1UL << (MINORBITS - part_shift); 645 range = rd_nr ? rd_nr << part_shift : 1UL << MINORBITS;
636 646
637 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) 647 list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
638 brd_del_one(brd); 648 brd_del_one(brd);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c59a672a3de0..76c8da78212b 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1540,9 +1540,9 @@ static const struct block_device_operations lo_fops = {
1540 * And now the modules code and kernel interface. 1540 * And now the modules code and kernel interface.
1541 */ 1541 */
1542static int max_loop; 1542static int max_loop;
1543module_param(max_loop, int, 0); 1543module_param(max_loop, int, S_IRUGO);
1544MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); 1544MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
1545module_param(max_part, int, 0); 1545module_param(max_part, int, S_IRUGO);
1546MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device"); 1546MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
1547MODULE_LICENSE("GPL"); 1547MODULE_LICENSE("GPL");
1548MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR); 1548MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
@@ -1688,9 +1688,20 @@ static int __init loop_init(void)
1688 */ 1688 */
1689 1689
1690 part_shift = 0; 1690 part_shift = 0;
1691 if (max_part > 0) 1691 if (max_part > 0) {
1692 part_shift = fls(max_part); 1692 part_shift = fls(max_part);
1693 1693
1694 /*
1695 * Adjust max_part according to part_shift as it is exported
1696 * to user space so that user can decide correct minor number
1697 * if [s]he want to create more devices.
1698 *
1699 * Note that -1 is required because partition 0 is reserved
1700 * for the whole disk.
1701 */
1702 max_part = (1UL << part_shift) - 1;
1703 }
1704
1694 if ((1UL << part_shift) > DISK_MAX_PARTS) 1705 if ((1UL << part_shift) > DISK_MAX_PARTS)
1695 return -EINVAL; 1706 return -EINVAL;
1696 1707
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 38223e93aa98..58c0e6387cf7 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -36,6 +36,7 @@
36#include <asm/system.h> 36#include <asm/system.h>
37#include <linux/poll.h> 37#include <linux/poll.h>
38#include <linux/sched.h> 38#include <linux/sched.h>
39#include <linux/seq_file.h>
39#include <linux/spinlock.h> 40#include <linux/spinlock.h>
40#include <linux/mutex.h> 41#include <linux/mutex.h>
41#include <linux/slab.h> 42#include <linux/slab.h>
@@ -1896,102 +1897,128 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
1896EXPORT_SYMBOL(ipmi_request_supply_msgs); 1897EXPORT_SYMBOL(ipmi_request_supply_msgs);
1897 1898
1898#ifdef CONFIG_PROC_FS 1899#ifdef CONFIG_PROC_FS
1899static int ipmb_file_read_proc(char *page, char **start, off_t off, 1900static int smi_ipmb_proc_show(struct seq_file *m, void *v)
1900 int count, int *eof, void *data)
1901{ 1901{
1902 char *out = (char *) page; 1902 ipmi_smi_t intf = m->private;
1903 ipmi_smi_t intf = data;
1904 int i; 1903 int i;
1905 int rv = 0;
1906 1904
1907 for (i = 0; i < IPMI_MAX_CHANNELS; i++) 1905 seq_printf(m, "%x", intf->channels[0].address);
1908 rv += sprintf(out+rv, "%x ", intf->channels[i].address); 1906 for (i = 1; i < IPMI_MAX_CHANNELS; i++)
1909 out[rv-1] = '\n'; /* Replace the final space with a newline */ 1907 seq_printf(m, " %x", intf->channels[i].address);
1910 out[rv] = '\0'; 1908 return seq_putc(m, '\n');
1911 rv++;
1912 return rv;
1913} 1909}
1914 1910
1915static int version_file_read_proc(char *page, char **start, off_t off, 1911static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
1916 int count, int *eof, void *data)
1917{ 1912{
1918 char *out = (char *) page; 1913 return single_open(file, smi_ipmb_proc_show, PDE(inode)->data);
1919 ipmi_smi_t intf = data; 1914}
1920 1915
1921 return sprintf(out, "%u.%u\n", 1916static const struct file_operations smi_ipmb_proc_ops = {
1917 .open = smi_ipmb_proc_open,
1918 .read = seq_read,
1919 .llseek = seq_lseek,
1920 .release = single_release,
1921};
1922
1923static int smi_version_proc_show(struct seq_file *m, void *v)
1924{
1925 ipmi_smi_t intf = m->private;
1926
1927 return seq_printf(m, "%u.%u\n",
1922 ipmi_version_major(&intf->bmc->id), 1928 ipmi_version_major(&intf->bmc->id),
1923 ipmi_version_minor(&intf->bmc->id)); 1929 ipmi_version_minor(&intf->bmc->id));
1924} 1930}
1925 1931
1926static int stat_file_read_proc(char *page, char **start, off_t off, 1932static int smi_version_proc_open(struct inode *inode, struct file *file)
1927 int count, int *eof, void *data)
1928{ 1933{
1929 char *out = (char *) page; 1934 return single_open(file, smi_version_proc_show, PDE(inode)->data);
1930 ipmi_smi_t intf = data; 1935}
1936
1937static const struct file_operations smi_version_proc_ops = {
1938 .open = smi_version_proc_open,
1939 .read = seq_read,
1940 .llseek = seq_lseek,
1941 .release = single_release,
1942};
1931 1943
1932 out += sprintf(out, "sent_invalid_commands: %u\n", 1944static int smi_stats_proc_show(struct seq_file *m, void *v)
1945{
1946 ipmi_smi_t intf = m->private;
1947
1948 seq_printf(m, "sent_invalid_commands: %u\n",
1933 ipmi_get_stat(intf, sent_invalid_commands)); 1949 ipmi_get_stat(intf, sent_invalid_commands));
1934 out += sprintf(out, "sent_local_commands: %u\n", 1950 seq_printf(m, "sent_local_commands: %u\n",
1935 ipmi_get_stat(intf, sent_local_commands)); 1951 ipmi_get_stat(intf, sent_local_commands));
1936 out += sprintf(out, "handled_local_responses: %u\n", 1952 seq_printf(m, "handled_local_responses: %u\n",
1937 ipmi_get_stat(intf, handled_local_responses)); 1953 ipmi_get_stat(intf, handled_local_responses));
1938 out += sprintf(out, "unhandled_local_responses: %u\n", 1954 seq_printf(m, "unhandled_local_responses: %u\n",
1939 ipmi_get_stat(intf, unhandled_local_responses)); 1955 ipmi_get_stat(intf, unhandled_local_responses));
1940 out += sprintf(out, "sent_ipmb_commands: %u\n", 1956 seq_printf(m, "sent_ipmb_commands: %u\n",
1941 ipmi_get_stat(intf, sent_ipmb_commands)); 1957 ipmi_get_stat(intf, sent_ipmb_commands));
1942 out += sprintf(out, "sent_ipmb_command_errs: %u\n", 1958 seq_printf(m, "sent_ipmb_command_errs: %u\n",
1943 ipmi_get_stat(intf, sent_ipmb_command_errs)); 1959 ipmi_get_stat(intf, sent_ipmb_command_errs));
1944 out += sprintf(out, "retransmitted_ipmb_commands: %u\n", 1960 seq_printf(m, "retransmitted_ipmb_commands: %u\n",
1945 ipmi_get_stat(intf, retransmitted_ipmb_commands)); 1961 ipmi_get_stat(intf, retransmitted_ipmb_commands));
1946 out += sprintf(out, "timed_out_ipmb_commands: %u\n", 1962 seq_printf(m, "timed_out_ipmb_commands: %u\n",
1947 ipmi_get_stat(intf, timed_out_ipmb_commands)); 1963 ipmi_get_stat(intf, timed_out_ipmb_commands));
1948 out += sprintf(out, "timed_out_ipmb_broadcasts: %u\n", 1964 seq_printf(m, "timed_out_ipmb_broadcasts: %u\n",
1949 ipmi_get_stat(intf, timed_out_ipmb_broadcasts)); 1965 ipmi_get_stat(intf, timed_out_ipmb_broadcasts));
1950 out += sprintf(out, "sent_ipmb_responses: %u\n", 1966 seq_printf(m, "sent_ipmb_responses: %u\n",
1951 ipmi_get_stat(intf, sent_ipmb_responses)); 1967 ipmi_get_stat(intf, sent_ipmb_responses));
1952 out += sprintf(out, "handled_ipmb_responses: %u\n", 1968 seq_printf(m, "handled_ipmb_responses: %u\n",
1953 ipmi_get_stat(intf, handled_ipmb_responses)); 1969 ipmi_get_stat(intf, handled_ipmb_responses));
1954 out += sprintf(out, "invalid_ipmb_responses: %u\n", 1970 seq_printf(m, "invalid_ipmb_responses: %u\n",
1955 ipmi_get_stat(intf, invalid_ipmb_responses)); 1971 ipmi_get_stat(intf, invalid_ipmb_responses));
1956 out += sprintf(out, "unhandled_ipmb_responses: %u\n", 1972 seq_printf(m, "unhandled_ipmb_responses: %u\n",
1957 ipmi_get_stat(intf, unhandled_ipmb_responses)); 1973 ipmi_get_stat(intf, unhandled_ipmb_responses));
1958 out += sprintf(out, "sent_lan_commands: %u\n", 1974 seq_printf(m, "sent_lan_commands: %u\n",
1959 ipmi_get_stat(intf, sent_lan_commands)); 1975 ipmi_get_stat(intf, sent_lan_commands));
1960 out += sprintf(out, "sent_lan_command_errs: %u\n", 1976 seq_printf(m, "sent_lan_command_errs: %u\n",
1961 ipmi_get_stat(intf, sent_lan_command_errs)); 1977 ipmi_get_stat(intf, sent_lan_command_errs));
1962 out += sprintf(out, "retransmitted_lan_commands: %u\n", 1978 seq_printf(m, "retransmitted_lan_commands: %u\n",
1963 ipmi_get_stat(intf, retransmitted_lan_commands)); 1979 ipmi_get_stat(intf, retransmitted_lan_commands));
1964 out += sprintf(out, "timed_out_lan_commands: %u\n", 1980 seq_printf(m, "timed_out_lan_commands: %u\n",
1965 ipmi_get_stat(intf, timed_out_lan_commands)); 1981 ipmi_get_stat(intf, timed_out_lan_commands));
1966 out += sprintf(out, "sent_lan_responses: %u\n", 1982 seq_printf(m, "sent_lan_responses: %u\n",
1967 ipmi_get_stat(intf, sent_lan_responses)); 1983 ipmi_get_stat(intf, sent_lan_responses));
1968 out += sprintf(out, "handled_lan_responses: %u\n", 1984 seq_printf(m, "handled_lan_responses: %u\n",
1969 ipmi_get_stat(intf, handled_lan_responses)); 1985 ipmi_get_stat(intf, handled_lan_responses));
1970 out += sprintf(out, "invalid_lan_responses: %u\n", 1986 seq_printf(m, "invalid_lan_responses: %u\n",
1971 ipmi_get_stat(intf, invalid_lan_responses)); 1987 ipmi_get_stat(intf, invalid_lan_responses));
1972 out += sprintf(out, "unhandled_lan_responses: %u\n", 1988 seq_printf(m, "unhandled_lan_responses: %u\n",
1973 ipmi_get_stat(intf, unhandled_lan_responses)); 1989 ipmi_get_stat(intf, unhandled_lan_responses));
1974 out += sprintf(out, "handled_commands: %u\n", 1990 seq_printf(m, "handled_commands: %u\n",
1975 ipmi_get_stat(intf, handled_commands)); 1991 ipmi_get_stat(intf, handled_commands));
1976 out += sprintf(out, "invalid_commands: %u\n", 1992 seq_printf(m, "invalid_commands: %u\n",
1977 ipmi_get_stat(intf, invalid_commands)); 1993 ipmi_get_stat(intf, invalid_commands));
1978 out += sprintf(out, "unhandled_commands: %u\n", 1994 seq_printf(m, "unhandled_commands: %u\n",
1979 ipmi_get_stat(intf, unhandled_commands)); 1995 ipmi_get_stat(intf, unhandled_commands));
1980 out += sprintf(out, "invalid_events: %u\n", 1996 seq_printf(m, "invalid_events: %u\n",
1981 ipmi_get_stat(intf, invalid_events)); 1997 ipmi_get_stat(intf, invalid_events));
1982 out += sprintf(out, "events: %u\n", 1998 seq_printf(m, "events: %u\n",
1983 ipmi_get_stat(intf, events)); 1999 ipmi_get_stat(intf, events));
1984 out += sprintf(out, "failed rexmit LAN msgs: %u\n", 2000 seq_printf(m, "failed rexmit LAN msgs: %u\n",
1985 ipmi_get_stat(intf, dropped_rexmit_lan_commands)); 2001 ipmi_get_stat(intf, dropped_rexmit_lan_commands));
1986 out += sprintf(out, "failed rexmit IPMB msgs: %u\n", 2002 seq_printf(m, "failed rexmit IPMB msgs: %u\n",
1987 ipmi_get_stat(intf, dropped_rexmit_ipmb_commands)); 2003 ipmi_get_stat(intf, dropped_rexmit_ipmb_commands));
2004 return 0;
2005}
1988 2006
1989 return (out - ((char *) page)); 2007static int smi_stats_proc_open(struct inode *inode, struct file *file)
2008{
2009 return single_open(file, smi_stats_proc_show, PDE(inode)->data);
1990} 2010}
2011
2012static const struct file_operations smi_stats_proc_ops = {
2013 .open = smi_stats_proc_open,
2014 .read = seq_read,
2015 .llseek = seq_lseek,
2016 .release = single_release,
2017};
1991#endif /* CONFIG_PROC_FS */ 2018#endif /* CONFIG_PROC_FS */
1992 2019
1993int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, 2020int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1994 read_proc_t *read_proc, 2021 const struct file_operations *proc_ops,
1995 void *data) 2022 void *data)
1996{ 2023{
1997 int rv = 0; 2024 int rv = 0;
@@ -2010,15 +2037,12 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
2010 } 2037 }
2011 strcpy(entry->name, name); 2038 strcpy(entry->name, name);
2012 2039
2013 file = create_proc_entry(name, 0, smi->proc_dir); 2040 file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data);
2014 if (!file) { 2041 if (!file) {
2015 kfree(entry->name); 2042 kfree(entry->name);
2016 kfree(entry); 2043 kfree(entry);
2017 rv = -ENOMEM; 2044 rv = -ENOMEM;
2018 } else { 2045 } else {
2019 file->data = data;
2020 file->read_proc = read_proc;
2021
2022 mutex_lock(&smi->proc_entry_lock); 2046 mutex_lock(&smi->proc_entry_lock);
2023 /* Stick it on the list. */ 2047 /* Stick it on the list. */
2024 entry->next = smi->proc_entries; 2048 entry->next = smi->proc_entries;
@@ -2043,17 +2067,17 @@ static int add_proc_entries(ipmi_smi_t smi, int num)
2043 2067
2044 if (rv == 0) 2068 if (rv == 0)
2045 rv = ipmi_smi_add_proc_entry(smi, "stats", 2069 rv = ipmi_smi_add_proc_entry(smi, "stats",
2046 stat_file_read_proc, 2070 &smi_stats_proc_ops,
2047 smi); 2071 smi);
2048 2072
2049 if (rv == 0) 2073 if (rv == 0)
2050 rv = ipmi_smi_add_proc_entry(smi, "ipmb", 2074 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
2051 ipmb_file_read_proc, 2075 &smi_ipmb_proc_ops,
2052 smi); 2076 smi);
2053 2077
2054 if (rv == 0) 2078 if (rv == 0)
2055 rv = ipmi_smi_add_proc_entry(smi, "version", 2079 rv = ipmi_smi_add_proc_entry(smi, "version",
2056 version_file_read_proc, 2080 &smi_version_proc_ops,
2057 smi); 2081 smi);
2058#endif /* CONFIG_PROC_FS */ 2082#endif /* CONFIG_PROC_FS */
2059 2083
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 64c6b8530615..9397ab49b72e 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -43,6 +43,7 @@
43#include <linux/moduleparam.h> 43#include <linux/moduleparam.h>
44#include <asm/system.h> 44#include <asm/system.h>
45#include <linux/sched.h> 45#include <linux/sched.h>
46#include <linux/seq_file.h>
46#include <linux/timer.h> 47#include <linux/timer.h>
47#include <linux/errno.h> 48#include <linux/errno.h>
48#include <linux/spinlock.h> 49#include <linux/spinlock.h>
@@ -2805,54 +2806,73 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
2805 return rv; 2806 return rv;
2806} 2807}
2807 2808
2808static int type_file_read_proc(char *page, char **start, off_t off, 2809static int smi_type_proc_show(struct seq_file *m, void *v)
2809 int count, int *eof, void *data)
2810{ 2810{
2811 struct smi_info *smi = data; 2811 struct smi_info *smi = m->private;
2812 2812
2813 return sprintf(page, "%s\n", si_to_str[smi->si_type]); 2813 return seq_printf(m, "%s\n", si_to_str[smi->si_type]);
2814} 2814}
2815 2815
2816static int stat_file_read_proc(char *page, char **start, off_t off, 2816static int smi_type_proc_open(struct inode *inode, struct file *file)
2817 int count, int *eof, void *data)
2818{ 2817{
2819 char *out = (char *) page; 2818 return single_open(file, smi_type_proc_show, PDE(inode)->data);
2820 struct smi_info *smi = data; 2819}
2820
2821static const struct file_operations smi_type_proc_ops = {
2822 .open = smi_type_proc_open,
2823 .read = seq_read,
2824 .llseek = seq_lseek,
2825 .release = single_release,
2826};
2827
2828static int smi_si_stats_proc_show(struct seq_file *m, void *v)
2829{
2830 struct smi_info *smi = m->private;
2821 2831
2822 out += sprintf(out, "interrupts_enabled: %d\n", 2832 seq_printf(m, "interrupts_enabled: %d\n",
2823 smi->irq && !smi->interrupt_disabled); 2833 smi->irq && !smi->interrupt_disabled);
2824 out += sprintf(out, "short_timeouts: %u\n", 2834 seq_printf(m, "short_timeouts: %u\n",
2825 smi_get_stat(smi, short_timeouts)); 2835 smi_get_stat(smi, short_timeouts));
2826 out += sprintf(out, "long_timeouts: %u\n", 2836 seq_printf(m, "long_timeouts: %u\n",
2827 smi_get_stat(smi, long_timeouts)); 2837 smi_get_stat(smi, long_timeouts));
2828 out += sprintf(out, "idles: %u\n", 2838 seq_printf(m, "idles: %u\n",
2829 smi_get_stat(smi, idles)); 2839 smi_get_stat(smi, idles));
2830 out += sprintf(out, "interrupts: %u\n", 2840 seq_printf(m, "interrupts: %u\n",
2831 smi_get_stat(smi, interrupts)); 2841 smi_get_stat(smi, interrupts));
2832 out += sprintf(out, "attentions: %u\n", 2842 seq_printf(m, "attentions: %u\n",
2833 smi_get_stat(smi, attentions)); 2843 smi_get_stat(smi, attentions));
2834 out += sprintf(out, "flag_fetches: %u\n", 2844 seq_printf(m, "flag_fetches: %u\n",
2835 smi_get_stat(smi, flag_fetches)); 2845 smi_get_stat(smi, flag_fetches));
2836 out += sprintf(out, "hosed_count: %u\n", 2846 seq_printf(m, "hosed_count: %u\n",
2837 smi_get_stat(smi, hosed_count)); 2847 smi_get_stat(smi, hosed_count));
2838 out += sprintf(out, "complete_transactions: %u\n", 2848 seq_printf(m, "complete_transactions: %u\n",
2839 smi_get_stat(smi, complete_transactions)); 2849 smi_get_stat(smi, complete_transactions));
2840 out += sprintf(out, "events: %u\n", 2850 seq_printf(m, "events: %u\n",
2841 smi_get_stat(smi, events)); 2851 smi_get_stat(smi, events));
2842 out += sprintf(out, "watchdog_pretimeouts: %u\n", 2852 seq_printf(m, "watchdog_pretimeouts: %u\n",
2843 smi_get_stat(smi, watchdog_pretimeouts)); 2853 smi_get_stat(smi, watchdog_pretimeouts));
2844 out += sprintf(out, "incoming_messages: %u\n", 2854 seq_printf(m, "incoming_messages: %u\n",
2845 smi_get_stat(smi, incoming_messages)); 2855 smi_get_stat(smi, incoming_messages));
2856 return 0;
2857}
2846 2858
2847 return out - page; 2859static int smi_si_stats_proc_open(struct inode *inode, struct file *file)
2860{
2861 return single_open(file, smi_si_stats_proc_show, PDE(inode)->data);
2848} 2862}
2849 2863
2850static int param_read_proc(char *page, char **start, off_t off, 2864static const struct file_operations smi_si_stats_proc_ops = {
2851 int count, int *eof, void *data) 2865 .open = smi_si_stats_proc_open,
2866 .read = seq_read,
2867 .llseek = seq_lseek,
2868 .release = single_release,
2869};
2870
2871static int smi_params_proc_show(struct seq_file *m, void *v)
2852{ 2872{
2853 struct smi_info *smi = data; 2873 struct smi_info *smi = m->private;
2854 2874
2855 return sprintf(page, 2875 return seq_printf(m,
2856 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", 2876 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2857 si_to_str[smi->si_type], 2877 si_to_str[smi->si_type],
2858 addr_space_to_str[smi->io.addr_type], 2878 addr_space_to_str[smi->io.addr_type],
@@ -2864,6 +2884,18 @@ static int param_read_proc(char *page, char **start, off_t off,
2864 smi->slave_addr); 2884 smi->slave_addr);
2865} 2885}
2866 2886
2887static int smi_params_proc_open(struct inode *inode, struct file *file)
2888{
2889 return single_open(file, smi_params_proc_show, PDE(inode)->data);
2890}
2891
2892static const struct file_operations smi_params_proc_ops = {
2893 .open = smi_params_proc_open,
2894 .read = seq_read,
2895 .llseek = seq_lseek,
2896 .release = single_release,
2897};
2898
2867/* 2899/*
2868 * oem_data_avail_to_receive_msg_avail 2900 * oem_data_avail_to_receive_msg_avail
2869 * @info - smi_info structure with msg_flags set 2901 * @info - smi_info structure with msg_flags set
@@ -3257,7 +3289,7 @@ static int try_smi_init(struct smi_info *new_smi)
3257 } 3289 }
3258 3290
3259 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type", 3291 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
3260 type_file_read_proc, 3292 &smi_type_proc_ops,
3261 new_smi); 3293 new_smi);
3262 if (rv) { 3294 if (rv) {
3263 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); 3295 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
@@ -3265,7 +3297,7 @@ static int try_smi_init(struct smi_info *new_smi)
3265 } 3297 }
3266 3298
3267 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats", 3299 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
3268 stat_file_read_proc, 3300 &smi_si_stats_proc_ops,
3269 new_smi); 3301 new_smi);
3270 if (rv) { 3302 if (rv) {
3271 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); 3303 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
@@ -3273,7 +3305,7 @@ static int try_smi_init(struct smi_info *new_smi)
3273 } 3305 }
3274 3306
3275 rv = ipmi_smi_add_proc_entry(new_smi->intf, "params", 3307 rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
3276 param_read_proc, 3308 &smi_params_proc_ops,
3277 new_smi); 3309 new_smi);
3278 if (rv) { 3310 if (rv) {
3279 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); 3311 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 966a95bc974b..25d139c9dbed 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -271,14 +271,13 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
271 pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 271 pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
272 vdata_size = sizeof(struct vma_data) + pages * sizeof(long); 272 vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
273 if (vdata_size <= PAGE_SIZE) 273 if (vdata_size <= PAGE_SIZE)
274 vdata = kmalloc(vdata_size, GFP_KERNEL); 274 vdata = kzalloc(vdata_size, GFP_KERNEL);
275 else { 275 else {
276 vdata = vmalloc(vdata_size); 276 vdata = vzalloc(vdata_size);
277 flags = VMD_VMALLOCED; 277 flags = VMD_VMALLOCED;
278 } 278 }
279 if (!vdata) 279 if (!vdata)
280 return -ENOMEM; 280 return -ENOMEM;
281 memset(vdata, 0, vdata_size);
282 281
283 vdata->vm_start = vma->vm_start; 282 vdata->vm_start = vma->vm_start;
284 vdata->vm_end = vma->vm_end; 283 vdata->vm_end = vma->vm_end;
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index f176dbaeb15a..3fcf80ff12f2 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -457,6 +457,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
457 return -ENODEV; 457 return -ENODEV;
458 458
459 modes = port->modes; 459 modes = port->modes;
460 parport_put_port(port);
460 if (copy_to_user (argp, &modes, sizeof (modes))) { 461 if (copy_to_user (argp, &modes, sizeof (modes))) {
461 return -EFAULT; 462 return -EFAULT;
462 } 463 }
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index cace0a7b707a..e47e73bbbcc5 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -19,7 +19,7 @@
19#include <linux/edac.h> 19#include <linux/edac.h>
20#include "edac_core.h" 20#include "edac_core.h"
21 21
22#define AMD76X_REVISION " Ver: 2.0.2 " __DATE__ 22#define AMD76X_REVISION " Ver: 2.0.2"
23#define EDAC_MOD_STR "amd76x_edac" 23#define EDAC_MOD_STR "amd76x_edac"
24 24
25#define amd76x_printk(level, fmt, arg...) \ 25#define amd76x_printk(level, fmt, arg...) \
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c
index 35b78d04bbfa..ddd890052ce2 100644
--- a/drivers/edac/amd8111_edac.c
+++ b/drivers/edac/amd8111_edac.c
@@ -33,7 +33,7 @@
33#include "edac_module.h" 33#include "edac_module.h"
34#include "amd8111_edac.h" 34#include "amd8111_edac.h"
35 35
36#define AMD8111_EDAC_REVISION " Ver: 1.0.0 " __DATE__ 36#define AMD8111_EDAC_REVISION " Ver: 1.0.0"
37#define AMD8111_EDAC_MOD_STR "amd8111_edac" 37#define AMD8111_EDAC_MOD_STR "amd8111_edac"
38 38
39#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460 39#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460
diff --git a/drivers/edac/amd8131_edac.c b/drivers/edac/amd8131_edac.c
index b432d60c622a..a5c680561c73 100644
--- a/drivers/edac/amd8131_edac.c
+++ b/drivers/edac/amd8131_edac.c
@@ -33,7 +33,7 @@
33#include "edac_module.h" 33#include "edac_module.h"
34#include "amd8131_edac.h" 34#include "amd8131_edac.h"
35 35
36#define AMD8131_EDAC_REVISION " Ver: 1.0.0 " __DATE__ 36#define AMD8131_EDAC_REVISION " Ver: 1.0.0"
37#define AMD8131_EDAC_MOD_STR "amd8131_edac" 37#define AMD8131_EDAC_MOD_STR "amd8131_edac"
38 38
39/* Wrapper functions for accessing PCI configuration space */ 39/* Wrapper functions for accessing PCI configuration space */
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index 837ad8f85b48..a687a0d16962 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -30,7 +30,7 @@
30#include "edac_core.h" 30#include "edac_core.h"
31#include "edac_module.h" 31#include "edac_module.h"
32 32
33#define CPC925_EDAC_REVISION " Ver: 1.0.0 " __DATE__ 33#define CPC925_EDAC_REVISION " Ver: 1.0.0"
34#define CPC925_EDAC_MOD_STR "cpc925_edac" 34#define CPC925_EDAC_MOD_STR "cpc925_edac"
35 35
36#define cpc925_printk(level, fmt, arg...) \ 36#define cpc925_printk(level, fmt, arg...) \
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index ec302d426589..1af531a11d21 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -24,7 +24,7 @@
24#include <linux/edac.h> 24#include <linux/edac.h>
25#include "edac_core.h" 25#include "edac_core.h"
26 26
27#define E752X_REVISION " Ver: 2.0.2 " __DATE__ 27#define E752X_REVISION " Ver: 2.0.2"
28#define EDAC_MOD_STR "e752x_edac" 28#define EDAC_MOD_STR "e752x_edac"
29 29
30static int report_non_memory_errors; 30static int report_non_memory_errors;
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 1731d7245816..6ffb6d23281f 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -29,7 +29,7 @@
29#include <linux/edac.h> 29#include <linux/edac.h>
30#include "edac_core.h" 30#include "edac_core.h"
31 31
32#define E7XXX_REVISION " Ver: 2.0.2 " __DATE__ 32#define E7XXX_REVISION " Ver: 2.0.2"
33#define EDAC_MOD_STR "e7xxx_edac" 33#define EDAC_MOD_STR "e7xxx_edac"
34 34
35#define e7xxx_printk(level, fmt, arg...) \ 35#define e7xxx_printk(level, fmt, arg...) \
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index eefa3501916b..55b8278bb172 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -421,10 +421,6 @@ struct mem_ctl_info {
421 u32 ce_count; /* Total Correctable Errors for this MC */ 421 u32 ce_count; /* Total Correctable Errors for this MC */
422 unsigned long start_time; /* mci load start time (in jiffies) */ 422 unsigned long start_time; /* mci load start time (in jiffies) */
423 423
424 /* this stuff is for safe removal of mc devices from global list while
425 * NMI handlers may be traversing list
426 */
427 struct rcu_head rcu;
428 struct completion complete; 424 struct completion complete;
429 425
430 /* edac sysfs device control */ 426 /* edac sysfs device control */
@@ -620,10 +616,6 @@ struct edac_device_ctl_info {
620 616
621 unsigned long start_time; /* edac_device load start time (jiffies) */ 617 unsigned long start_time; /* edac_device load start time (jiffies) */
622 618
623 /* these are for safe removal of mc devices from global list while
624 * NMI handlers may be traversing list
625 */
626 struct rcu_head rcu;
627 struct completion removal_complete; 619 struct completion removal_complete;
628 620
629 /* sysfs top name under 'edac' directory 621 /* sysfs top name under 'edac' directory
@@ -722,10 +714,6 @@ struct edac_pci_ctl_info {
722 714
723 unsigned long start_time; /* edac_pci load start time (jiffies) */ 715 unsigned long start_time; /* edac_pci load start time (jiffies) */
724 716
725 /* these are for safe removal of devices from global list while
726 * NMI handlers may be traversing list
727 */
728 struct rcu_head rcu;
729 struct completion complete; 717 struct completion complete;
730 718
731 /* sysfs top name under 'edac' directory 719 /* sysfs top name under 'edac' directory
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index a7408cf86f37..c3f67437afb6 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -346,30 +346,18 @@ fail1:
346} 346}
347 347
348/* 348/*
349 * complete_edac_device_list_del
350 *
351 * callback function when reference count is zero
352 */
353static void complete_edac_device_list_del(struct rcu_head *head)
354{
355 struct edac_device_ctl_info *edac_dev;
356
357 edac_dev = container_of(head, struct edac_device_ctl_info, rcu);
358 INIT_LIST_HEAD(&edac_dev->link);
359}
360
361/*
362 * del_edac_device_from_global_list 349 * del_edac_device_from_global_list
363 *
364 * remove the RCU, setup for a callback call,
365 * then wait for the callback to occur
366 */ 350 */
367static void del_edac_device_from_global_list(struct edac_device_ctl_info 351static void del_edac_device_from_global_list(struct edac_device_ctl_info
368 *edac_device) 352 *edac_device)
369{ 353{
370 list_del_rcu(&edac_device->link); 354 list_del_rcu(&edac_device->link);
371 call_rcu(&edac_device->rcu, complete_edac_device_list_del); 355
372 rcu_barrier(); 356 /* these are for safe removal of devices from global list while
357 * NMI handlers may be traversing list
358 */
359 synchronize_rcu();
360 INIT_LIST_HEAD(&edac_device->link);
373} 361}
374 362
375/* 363/*
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 1d8056049072..d69144a09043 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -447,20 +447,16 @@ fail1:
447 return 1; 447 return 1;
448} 448}
449 449
450static void complete_mc_list_del(struct rcu_head *head)
451{
452 struct mem_ctl_info *mci;
453
454 mci = container_of(head, struct mem_ctl_info, rcu);
455 INIT_LIST_HEAD(&mci->link);
456}
457
458static void del_mc_from_global_list(struct mem_ctl_info *mci) 450static void del_mc_from_global_list(struct mem_ctl_info *mci)
459{ 451{
460 atomic_dec(&edac_handlers); 452 atomic_dec(&edac_handlers);
461 list_del_rcu(&mci->link); 453 list_del_rcu(&mci->link);
462 call_rcu(&mci->rcu, complete_mc_list_del); 454
463 rcu_barrier(); 455 /* these are for safe removal of devices from global list while
456 * NMI handlers may be traversing list
457 */
458 synchronize_rcu();
459 INIT_LIST_HEAD(&mci->link);
464} 460}
465 461
466/** 462/**
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
index be4b075c3098..5ddaa86d6a6e 100644
--- a/drivers/edac/edac_module.c
+++ b/drivers/edac/edac_module.c
@@ -15,7 +15,7 @@
15#include "edac_core.h" 15#include "edac_core.h"
16#include "edac_module.h" 16#include "edac_module.h"
17 17
18#define EDAC_VERSION "Ver: 2.1.0 " __DATE__ 18#define EDAC_VERSION "Ver: 2.1.0"
19 19
20#ifdef CONFIG_EDAC_DEBUG 20#ifdef CONFIG_EDAC_DEBUG
21/* Values of 0 to 4 will generate output */ 21/* Values of 0 to 4 will generate output */
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index efb5d5650783..2b378207d571 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -164,19 +164,6 @@ fail1:
164} 164}
165 165
166/* 166/*
167 * complete_edac_pci_list_del
168 *
169 * RCU completion callback to indicate item is deleted
170 */
171static void complete_edac_pci_list_del(struct rcu_head *head)
172{
173 struct edac_pci_ctl_info *pci;
174
175 pci = container_of(head, struct edac_pci_ctl_info, rcu);
176 INIT_LIST_HEAD(&pci->link);
177}
178
179/*
180 * del_edac_pci_from_global_list 167 * del_edac_pci_from_global_list
181 * 168 *
182 * remove the PCI control struct from the global list 169 * remove the PCI control struct from the global list
@@ -184,8 +171,12 @@ static void complete_edac_pci_list_del(struct rcu_head *head)
184static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci) 171static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci)
185{ 172{
186 list_del_rcu(&pci->link); 173 list_del_rcu(&pci->link);
187 call_rcu(&pci->rcu, complete_edac_pci_list_del); 174
188 rcu_barrier(); 175 /* these are for safe removal of devices from global list while
176 * NMI handlers may be traversing list
177 */
178 synchronize_rcu();
179 INIT_LIST_HEAD(&pci->link);
189} 180}
190 181
191#if 0 182#if 0
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 87f427c2ce5c..4dc3ac25a422 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -27,7 +27,7 @@
27/* 27/*
28 * Alter this version for the I5000 module when modifications are made 28 * Alter this version for the I5000 module when modifications are made
29 */ 29 */
30#define I5000_REVISION " Ver: 2.0.12 " __DATE__ 30#define I5000_REVISION " Ver: 2.0.12"
31#define EDAC_MOD_STR "i5000_edac" 31#define EDAC_MOD_STR "i5000_edac"
32 32
33#define i5000_printk(level, fmt, arg...) \ 33#define i5000_printk(level, fmt, arg...) \
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 80a465efbae8..74d6ec342afb 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -33,7 +33,7 @@
33/* 33/*
34 * Alter this version for the I5400 module when modifications are made 34 * Alter this version for the I5400 module when modifications are made
35 */ 35 */
36#define I5400_REVISION " Ver: 1.0.0 " __DATE__ 36#define I5400_REVISION " Ver: 1.0.0"
37 37
38#define EDAC_MOD_STR "i5400_edac" 38#define EDAC_MOD_STR "i5400_edac"
39 39
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 363cc1602944..a76fe8366b68 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -31,7 +31,7 @@
31/* 31/*
32 * Alter this version for the I7300 module when modifications are made 32 * Alter this version for the I7300 module when modifications are made
33 */ 33 */
34#define I7300_REVISION " Ver: 1.0.0 " __DATE__ 34#define I7300_REVISION " Ver: 1.0.0"
35 35
36#define EDAC_MOD_STR "i7300_edac" 36#define EDAC_MOD_STR "i7300_edac"
37 37
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 465cbc25149f..04f1e7ce02b1 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -59,7 +59,7 @@ MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
59/* 59/*
60 * Alter this version for the module when modifications are made 60 * Alter this version for the module when modifications are made
61 */ 61 */
62#define I7CORE_REVISION " Ver: 1.0.0 " __DATE__ 62#define I7CORE_REVISION " Ver: 1.0.0"
63#define EDAC_MOD_STR "i7core_edac" 63#define EDAC_MOD_STR "i7core_edac"
64 64
65/* 65/*
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index b8a95cf50718..931a05775049 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -16,7 +16,7 @@
16#include <linux/edac.h> 16#include <linux/edac.h>
17#include "edac_core.h" 17#include "edac_core.h"
18 18
19#define I82860_REVISION " Ver: 2.0.2 " __DATE__ 19#define I82860_REVISION " Ver: 2.0.2"
20#define EDAC_MOD_STR "i82860_edac" 20#define EDAC_MOD_STR "i82860_edac"
21 21
22#define i82860_printk(level, fmt, arg...) \ 22#define i82860_printk(level, fmt, arg...) \
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index b2fd1e899142..33864c63c684 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -20,7 +20,7 @@
20#include <linux/edac.h> 20#include <linux/edac.h>
21#include "edac_core.h" 21#include "edac_core.h"
22 22
23#define I82875P_REVISION " Ver: 2.0.2 " __DATE__ 23#define I82875P_REVISION " Ver: 2.0.2"
24#define EDAC_MOD_STR "i82875p_edac" 24#define EDAC_MOD_STR "i82875p_edac"
25 25
26#define i82875p_printk(level, fmt, arg...) \ 26#define i82875p_printk(level, fmt, arg...) \
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 92e65e7038e9..a5da732fe5b2 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -16,7 +16,7 @@
16#include <linux/edac.h> 16#include <linux/edac.h>
17#include "edac_core.h" 17#include "edac_core.h"
18 18
19#define I82975X_REVISION " Ver: 1.0.0 " __DATE__ 19#define I82975X_REVISION " Ver: 1.0.0"
20#define EDAC_MOD_STR "i82975x_edac" 20#define EDAC_MOD_STR "i82975x_edac"
21 21
22#define i82975x_printk(level, fmt, arg...) \ 22#define i82975x_printk(level, fmt, arg...) \
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h
index cb24df839460..932016f2cf06 100644
--- a/drivers/edac/mpc85xx_edac.h
+++ b/drivers/edac/mpc85xx_edac.h
@@ -11,7 +11,7 @@
11#ifndef _MPC85XX_EDAC_H_ 11#ifndef _MPC85XX_EDAC_H_
12#define _MPC85XX_EDAC_H_ 12#define _MPC85XX_EDAC_H_
13 13
14#define MPC85XX_REVISION " Ver: 2.0.0 " __DATE__ 14#define MPC85XX_REVISION " Ver: 2.0.0"
15#define EDAC_MOD_STR "MPC85xx_edac" 15#define EDAC_MOD_STR "MPC85xx_edac"
16 16
17#define mpc85xx_printk(level, fmt, arg...) \ 17#define mpc85xx_printk(level, fmt, arg...) \
diff --git a/drivers/edac/mv64x60_edac.h b/drivers/edac/mv64x60_edac.h
index e042e2daa8f4..c7f209c92a1a 100644
--- a/drivers/edac/mv64x60_edac.h
+++ b/drivers/edac/mv64x60_edac.h
@@ -12,7 +12,7 @@
12#ifndef _MV64X60_EDAC_H_ 12#ifndef _MV64X60_EDAC_H_
13#define _MV64X60_EDAC_H_ 13#define _MV64X60_EDAC_H_
14 14
15#define MV64x60_REVISION " Ver: 2.0.0 " __DATE__ 15#define MV64x60_REVISION " Ver: 2.0.0"
16#define EDAC_MOD_STR "MV64x60_edac" 16#define EDAC_MOD_STR "MV64x60_edac"
17 17
18#define mv64x60_printk(level, fmt, arg...) \ 18#define mv64x60_printk(level, fmt, arg...) \
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index af8e7b1aa290..0de7d8770891 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -113,7 +113,7 @@
113#define EDAC_OPSTATE_UNKNOWN_STR "unknown" 113#define EDAC_OPSTATE_UNKNOWN_STR "unknown"
114 114
115#define PPC4XX_EDAC_MODULE_NAME "ppc4xx_edac" 115#define PPC4XX_EDAC_MODULE_NAME "ppc4xx_edac"
116#define PPC4XX_EDAC_MODULE_REVISION "v1.0.0 " __DATE__ 116#define PPC4XX_EDAC_MODULE_REVISION "v1.0.0"
117 117
118#define PPC4XX_EDAC_MESSAGE_SIZE 256 118#define PPC4XX_EDAC_MESSAGE_SIZE 256
119 119
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index 678513738c33..b153674431f1 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -22,7 +22,7 @@
22#include <linux/edac.h> 22#include <linux/edac.h>
23#include "edac_core.h" 23#include "edac_core.h"
24 24
25#define R82600_REVISION " Ver: 2.0.2 " __DATE__ 25#define R82600_REVISION " Ver: 2.0.2"
26#define EDAC_MOD_STR "r82600_edac" 26#define EDAC_MOD_STR "r82600_edac"
27 27
28#define r82600_printk(level, fmt, arg...) \ 28#define r82600_printk(level, fmt, arg...) \
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 978852afc9dc..592397629ddc 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -454,4 +454,11 @@ config AB8500_GPIO
454 depends on AB8500_CORE && BROKEN 454 depends on AB8500_CORE && BROKEN
455 help 455 help
456 Select this to enable the AB8500 IC GPIO driver 456 Select this to enable the AB8500 IC GPIO driver
457
458config GPIO_TPS65910
459 bool "TPS65910 GPIO"
460 depends on MFD_TPS65910
461 help
462 Select this option to enable GPIO driver for the TPS65910
463 chip family.
457endif 464endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 4182040a3522..b605f8ec6fbe 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -47,3 +47,4 @@ obj-$(CONFIG_GPIO_SX150X) += sx150x.o
47obj-$(CONFIG_GPIO_VX855) += vx855_gpio.o 47obj-$(CONFIG_GPIO_VX855) += vx855_gpio.o
48obj-$(CONFIG_GPIO_ML_IOH) += ml_ioh_gpio.o 48obj-$(CONFIG_GPIO_ML_IOH) += ml_ioh_gpio.o
49obj-$(CONFIG_AB8500_GPIO) += ab8500-gpio.o 49obj-$(CONFIG_AB8500_GPIO) += ab8500-gpio.o
50obj-$(CONFIG_GPIO_TPS65910) += tps65910-gpio.o
diff --git a/drivers/gpio/tps65910-gpio.c b/drivers/gpio/tps65910-gpio.c
new file mode 100644
index 000000000000..8d1ddfdd63eb
--- /dev/null
+++ b/drivers/gpio/tps65910-gpio.c
@@ -0,0 +1,100 @@
1/*
2 * tps65910-gpio.c -- TI TPS6591x
3 *
4 * Copyright 2010 Texas Instruments Inc.
5 *
6 * Author: Graeme Gregory <gg@slimlogic.co.uk>
7 * Author: Jorge Eduardo Candelaria jedu@slimlogic.co.uk>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/errno.h>
19#include <linux/gpio.h>
20#include <linux/i2c.h>
21#include <linux/mfd/tps65910.h>
22
23static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset)
24{
25 struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
26 uint8_t val;
27
28 tps65910->read(tps65910, TPS65910_GPIO0 + offset, 1, &val);
29
30 if (val & GPIO_STS_MASK)
31 return 1;
32
33 return 0;
34}
35
36static void tps65910_gpio_set(struct gpio_chip *gc, unsigned offset,
37 int value)
38{
39 struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
40
41 if (value)
42 tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
43 GPIO_SET_MASK);
44 else
45 tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset,
46 GPIO_SET_MASK);
47}
48
49static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
50 int value)
51{
52 struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
53
54 /* Set the initial value */
55 tps65910_gpio_set(gc, 0, value);
56
57 return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
58 GPIO_CFG_MASK);
59}
60
61static int tps65910_gpio_input(struct gpio_chip *gc, unsigned offset)
62{
63 struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
64
65 return tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset,
66 GPIO_CFG_MASK);
67}
68
69void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base)
70{
71 int ret;
72
73 if (!gpio_base)
74 return;
75
76 tps65910->gpio.owner = THIS_MODULE;
77 tps65910->gpio.label = tps65910->i2c_client->name;
78 tps65910->gpio.dev = tps65910->dev;
79 tps65910->gpio.base = gpio_base;
80
81 switch(tps65910_chip_id(tps65910)) {
82 case TPS65910:
83 tps65910->gpio.ngpio = 6;
84 case TPS65911:
85 tps65910->gpio.ngpio = 9;
86 default:
87 return;
88 }
89 tps65910->gpio.can_sleep = 1;
90
91 tps65910->gpio.direction_input = tps65910_gpio_input;
92 tps65910->gpio.direction_output = tps65910_gpio_output;
93 tps65910->gpio.set = tps65910_gpio_set;
94 tps65910->gpio.get = tps65910_gpio_get;
95
96 ret = gpiochip_add(&tps65910->gpio);
97
98 if (ret)
99 dev_warn(tps65910->dev, "GPIO registration failed: %d\n", ret);
100}
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 9577c432e77f..de3d2465fe24 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -350,6 +350,7 @@ static int get_pkg_tjmax(unsigned int cpu, struct device *dev)
350 350
351static int create_name_attr(struct platform_data *pdata, struct device *dev) 351static int create_name_attr(struct platform_data *pdata, struct device *dev)
352{ 352{
353 sysfs_attr_init(&pdata->name_attr.attr);
353 pdata->name_attr.attr.name = "name"; 354 pdata->name_attr.attr.name = "name";
354 pdata->name_attr.attr.mode = S_IRUGO; 355 pdata->name_attr.attr.mode = S_IRUGO;
355 pdata->name_attr.show = show_name; 356 pdata->name_attr.show = show_name;
@@ -372,6 +373,7 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
372 for (i = 0; i < MAX_ATTRS; i++) { 373 for (i = 0; i < MAX_ATTRS; i++) {
373 snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i], 374 snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
374 attr_no); 375 attr_no);
376 sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
375 tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i]; 377 tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
376 tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO; 378 tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
377 tdata->sd_attrs[i].dev_attr.show = rd_ptr[i]; 379 tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
@@ -422,7 +424,7 @@ static void update_ttarget(__u8 cpu_model, struct temp_data *tdata,
422 } 424 }
423} 425}
424 426
425static int chk_ucode_version(struct platform_device *pdev) 427static int __devinit chk_ucode_version(struct platform_device *pdev)
426{ 428{
427 struct cpuinfo_x86 *c = &cpu_data(pdev->id); 429 struct cpuinfo_x86 *c = &cpu_data(pdev->id);
428 int err; 430 int err;
@@ -509,8 +511,8 @@ static int create_core_data(struct platform_data *pdata,
509 /* 511 /*
510 * Provide a single set of attributes for all HT siblings of a core 512 * Provide a single set of attributes for all HT siblings of a core
511 * to avoid duplicate sensors (the processor ID and core ID of all 513 * to avoid duplicate sensors (the processor ID and core ID of all
512 * HT siblings of a core is the same). 514 * HT siblings of a core are the same).
513 * Skip if a HT sibling of this core is already online. 515 * Skip if a HT sibling of this core is already registered.
514 * This is not an error. 516 * This is not an error.
515 */ 517 */
516 if (pdata->core_data[attr_no] != NULL) 518 if (pdata->core_data[attr_no] != NULL)
@@ -770,10 +772,10 @@ static void __cpuinit put_core_offline(unsigned int cpu)
770 coretemp_remove_core(pdata, &pdev->dev, indx); 772 coretemp_remove_core(pdata, &pdev->dev, indx);
771 773
772 /* 774 /*
773 * If a core is taken offline, but a HT sibling of the same core is 775 * If a HT sibling of a core is taken offline, but another HT sibling
774 * still online, register the alternate sibling. This ensures that 776 * of the same core is still online, register the alternate sibling.
775 * exactly one set of attributes is provided as long as at least one 777 * This ensures that exactly one set of attributes is provided as long
776 * HT sibling of a core is online. 778 * as at least one HT sibling of a core is online.
777 */ 779 */
778 for_each_sibling(i, cpu) { 780 for_each_sibling(i, cpu) {
779 if (i != cpu) { 781 if (i != cpu) {
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c
index 98799bab69ce..354770ed3186 100644
--- a/drivers/hwmon/pmbus_core.c
+++ b/drivers/hwmon/pmbus_core.c
@@ -707,6 +707,7 @@ do { \
707 struct sensor_device_attribute *a \ 707 struct sensor_device_attribute *a \
708 = &data->_type##s[data->num_##_type##s].attribute; \ 708 = &data->_type##s[data->num_##_type##s].attribute; \
709 BUG_ON(data->num_attributes >= data->max_attributes); \ 709 BUG_ON(data->num_attributes >= data->max_attributes); \
710 sysfs_attr_init(&a->dev_attr.attr); \
710 a->dev_attr.attr.name = _name; \ 711 a->dev_attr.attr.name = _name; \
711 a->dev_attr.attr.mode = _mode; \ 712 a->dev_attr.attr.mode = _mode; \
712 a->dev_attr.show = _show; \ 713 a->dev_attr.show = _show; \
diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
index d36a4c09e25d..0bbee7824d78 100644
--- a/drivers/isdn/hardware/eicon/divasfunc.c
+++ b/drivers/isdn/hardware/eicon/divasfunc.c
@@ -113,9 +113,8 @@ void diva_xdi_didd_remove_adapter(int card)
113static void start_dbg(void) 113static void start_dbg(void)
114{ 114{
115 DbgRegister("DIVAS", DRIVERRELEASE_DIVAS, (debugmask) ? debugmask : DBG_DEFAULT); 115 DbgRegister("DIVAS", DRIVERRELEASE_DIVAS, (debugmask) ? debugmask : DBG_DEFAULT);
116 DBG_LOG(("DIVA ISDNXDI BUILD (%s[%s]-%s-%s)", 116 DBG_LOG(("DIVA ISDNXDI BUILD (%s[%s])",
117 DIVA_BUILD, diva_xdi_common_code_build, __DATE__, 117 DIVA_BUILD, diva_xdi_common_code_build))
118 __TIME__))
119} 118}
120 119
121/* 120/*
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c
index 2d8b4044be36..b2b0c45f32a9 100644
--- a/drivers/media/dvb/dm1105/dm1105.c
+++ b/drivers/media/dvb/dm1105/dm1105.c
@@ -20,6 +20,7 @@
20 */ 20 */
21 21
22#include <linux/i2c.h> 22#include <linux/i2c.h>
23#include <linux/i2c-algo-bit.h>
23#include <linux/init.h> 24#include <linux/init.h>
24#include <linux/kernel.h> 25#include <linux/kernel.h>
25#include <linux/module.h> 26#include <linux/module.h>
@@ -49,11 +50,12 @@
49 50
50#define UNSET (-1U) 51#define UNSET (-1U)
51 52
52#define DM1105_BOARD_NOAUTO UNSET 53#define DM1105_BOARD_NOAUTO UNSET
53#define DM1105_BOARD_UNKNOWN 0 54#define DM1105_BOARD_UNKNOWN 0
54#define DM1105_BOARD_DVBWORLD_2002 1 55#define DM1105_BOARD_DVBWORLD_2002 1
55#define DM1105_BOARD_DVBWORLD_2004 2 56#define DM1105_BOARD_DVBWORLD_2004 2
56#define DM1105_BOARD_AXESS_DM05 3 57#define DM1105_BOARD_AXESS_DM05 3
58#define DM1105_BOARD_UNBRANDED_I2C_ON_GPIO 4
57 59
58/* ----------------------------------------------- */ 60/* ----------------------------------------------- */
59/* 61/*
@@ -157,22 +159,38 @@
157#define DM1105_MAX 0x04 159#define DM1105_MAX 0x04
158 160
159#define DRIVER_NAME "dm1105" 161#define DRIVER_NAME "dm1105"
162#define DM1105_I2C_GPIO_NAME "dm1105-gpio"
160 163
161#define DM1105_DMA_PACKETS 47 164#define DM1105_DMA_PACKETS 47
162#define DM1105_DMA_PACKET_LENGTH (128*4) 165#define DM1105_DMA_PACKET_LENGTH (128*4)
163#define DM1105_DMA_BYTES (128 * 4 * DM1105_DMA_PACKETS) 166#define DM1105_DMA_BYTES (128 * 4 * DM1105_DMA_PACKETS)
164 167
168/* */
169#define GPIO08 (1 << 8)
170#define GPIO13 (1 << 13)
171#define GPIO14 (1 << 14)
172#define GPIO15 (1 << 15)
173#define GPIO16 (1 << 16)
174#define GPIO17 (1 << 17)
175#define GPIO_ALL 0x03ffff
176
165/* GPIO's for LNB power control */ 177/* GPIO's for LNB power control */
166#define DM1105_LNB_MASK 0x00000000 178#define DM1105_LNB_MASK (GPIO_ALL & ~(GPIO14 | GPIO13))
167#define DM1105_LNB_OFF 0x00020000 179#define DM1105_LNB_OFF GPIO17
168#define DM1105_LNB_13V 0x00010100 180#define DM1105_LNB_13V (GPIO16 | GPIO08)
169#define DM1105_LNB_18V 0x00000100 181#define DM1105_LNB_18V GPIO08
170 182
171/* GPIO's for LNB power control for Axess DM05 */ 183/* GPIO's for LNB power control for Axess DM05 */
172#define DM05_LNB_MASK 0x00000000 184#define DM05_LNB_MASK (GPIO_ALL & ~(GPIO14 | GPIO13))
173#define DM05_LNB_OFF 0x00020000/* actually 13v */ 185#define DM05_LNB_OFF GPIO17/* actually 13v */
174#define DM05_LNB_13V 0x00020000 186#define DM05_LNB_13V GPIO17
175#define DM05_LNB_18V 0x00030000 187#define DM05_LNB_18V (GPIO17 | GPIO16)
188
189/* GPIO's for LNB power control for unbranded with I2C on GPIO */
190#define UNBR_LNB_MASK (GPIO17 | GPIO16)
191#define UNBR_LNB_OFF 0
192#define UNBR_LNB_13V GPIO17
193#define UNBR_LNB_18V (GPIO17 | GPIO16)
176 194
177static unsigned int card[] = {[0 ... 3] = UNSET }; 195static unsigned int card[] = {[0 ... 3] = UNSET };
178module_param_array(card, int, NULL, 0444); 196module_param_array(card, int, NULL, 0444);
@@ -187,7 +205,11 @@ static unsigned int dm1105_devcount;
187DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 205DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
188 206
189struct dm1105_board { 207struct dm1105_board {
190 char *name; 208 char *name;
209 struct {
210 u32 mask, off, v13, v18;
211 } lnb;
212 u32 gpio_scl, gpio_sda;
191}; 213};
192 214
193struct dm1105_subid { 215struct dm1105_subid {
@@ -199,15 +221,50 @@ struct dm1105_subid {
199static const struct dm1105_board dm1105_boards[] = { 221static const struct dm1105_board dm1105_boards[] = {
200 [DM1105_BOARD_UNKNOWN] = { 222 [DM1105_BOARD_UNKNOWN] = {
201 .name = "UNKNOWN/GENERIC", 223 .name = "UNKNOWN/GENERIC",
224 .lnb = {
225 .mask = DM1105_LNB_MASK,
226 .off = DM1105_LNB_OFF,
227 .v13 = DM1105_LNB_13V,
228 .v18 = DM1105_LNB_18V,
229 },
202 }, 230 },
203 [DM1105_BOARD_DVBWORLD_2002] = { 231 [DM1105_BOARD_DVBWORLD_2002] = {
204 .name = "DVBWorld PCI 2002", 232 .name = "DVBWorld PCI 2002",
233 .lnb = {
234 .mask = DM1105_LNB_MASK,
235 .off = DM1105_LNB_OFF,
236 .v13 = DM1105_LNB_13V,
237 .v18 = DM1105_LNB_18V,
238 },
205 }, 239 },
206 [DM1105_BOARD_DVBWORLD_2004] = { 240 [DM1105_BOARD_DVBWORLD_2004] = {
207 .name = "DVBWorld PCI 2004", 241 .name = "DVBWorld PCI 2004",
242 .lnb = {
243 .mask = DM1105_LNB_MASK,
244 .off = DM1105_LNB_OFF,
245 .v13 = DM1105_LNB_13V,
246 .v18 = DM1105_LNB_18V,
247 },
208 }, 248 },
209 [DM1105_BOARD_AXESS_DM05] = { 249 [DM1105_BOARD_AXESS_DM05] = {
210 .name = "Axess/EasyTv DM05", 250 .name = "Axess/EasyTv DM05",
251 .lnb = {
252 .mask = DM05_LNB_MASK,
253 .off = DM05_LNB_OFF,
254 .v13 = DM05_LNB_13V,
255 .v18 = DM05_LNB_18V,
256 },
257 },
258 [DM1105_BOARD_UNBRANDED_I2C_ON_GPIO] = {
259 .name = "Unbranded DM1105 with i2c on GPIOs",
260 .lnb = {
261 .mask = UNBR_LNB_MASK,
262 .off = UNBR_LNB_OFF,
263 .v13 = UNBR_LNB_13V,
264 .v18 = UNBR_LNB_18V,
265 },
266 .gpio_scl = GPIO14,
267 .gpio_sda = GPIO13,
211 }, 268 },
212}; 269};
213 270
@@ -293,6 +350,8 @@ struct dm1105_dev {
293 350
294 /* i2c */ 351 /* i2c */
295 struct i2c_adapter i2c_adap; 352 struct i2c_adapter i2c_adap;
353 struct i2c_adapter i2c_bb_adap;
354 struct i2c_algo_bit_data i2c_bit;
296 355
297 /* irq */ 356 /* irq */
298 struct work_struct work; 357 struct work_struct work;
@@ -328,6 +387,103 @@ struct dm1105_dev {
328#define dm_setl(reg, bit) dm_andorl((reg), (bit), (bit)) 387#define dm_setl(reg, bit) dm_andorl((reg), (bit), (bit))
329#define dm_clearl(reg, bit) dm_andorl((reg), (bit), 0) 388#define dm_clearl(reg, bit) dm_andorl((reg), (bit), 0)
330 389
390/* The chip has 18 GPIOs. In HOST mode GPIO's used as 15 bit address lines,
391 so we can use only 3 GPIO's from GPIO15 to GPIO17.
392 Here I don't check whether HOST is enebled as it is not implemented yet.
393 */
394static void dm1105_gpio_set(struct dm1105_dev *dev, u32 mask)
395{
396 if (mask & 0xfffc0000)
397 printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
398
399 if (mask & 0x0003ffff)
400 dm_setl(DM1105_GPIOVAL, mask & 0x0003ffff);
401
402}
403
404static void dm1105_gpio_clear(struct dm1105_dev *dev, u32 mask)
405{
406 if (mask & 0xfffc0000)
407 printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
408
409 if (mask & 0x0003ffff)
410 dm_clearl(DM1105_GPIOVAL, mask & 0x0003ffff);
411
412}
413
414static void dm1105_gpio_andor(struct dm1105_dev *dev, u32 mask, u32 val)
415{
416 if (mask & 0xfffc0000)
417 printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
418
419 if (mask & 0x0003ffff)
420 dm_andorl(DM1105_GPIOVAL, mask & 0x0003ffff, val);
421
422}
423
424static u32 dm1105_gpio_get(struct dm1105_dev *dev, u32 mask)
425{
426 if (mask & 0xfffc0000)
427 printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
428
429 if (mask & 0x0003ffff)
430 return dm_readl(DM1105_GPIOVAL) & mask & 0x0003ffff;
431
432 return 0;
433}
434
435static void dm1105_gpio_enable(struct dm1105_dev *dev, u32 mask, int asoutput)
436{
437 if (mask & 0xfffc0000)
438 printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
439
440 if ((mask & 0x0003ffff) && asoutput)
441 dm_clearl(DM1105_GPIOCTR, mask & 0x0003ffff);
442 else if ((mask & 0x0003ffff) && !asoutput)
443 dm_setl(DM1105_GPIOCTR, mask & 0x0003ffff);
444
445}
446
447static void dm1105_setline(struct dm1105_dev *dev, u32 line, int state)
448{
449 if (state)
450 dm1105_gpio_enable(dev, line, 0);
451 else {
452 dm1105_gpio_enable(dev, line, 1);
453 dm1105_gpio_clear(dev, line);
454 }
455}
456
457static void dm1105_setsda(void *data, int state)
458{
459 struct dm1105_dev *dev = data;
460
461 dm1105_setline(dev, dm1105_boards[dev->boardnr].gpio_sda, state);
462}
463
464static void dm1105_setscl(void *data, int state)
465{
466 struct dm1105_dev *dev = data;
467
468 dm1105_setline(dev, dm1105_boards[dev->boardnr].gpio_scl, state);
469}
470
471static int dm1105_getsda(void *data)
472{
473 struct dm1105_dev *dev = data;
474
475 return dm1105_gpio_get(dev, dm1105_boards[dev->boardnr].gpio_sda)
476 ? 1 : 0;
477}
478
479static int dm1105_getscl(void *data)
480{
481 struct dm1105_dev *dev = data;
482
483 return dm1105_gpio_get(dev, dm1105_boards[dev->boardnr].gpio_scl)
484 ? 1 : 0;
485}
486
331static int dm1105_i2c_xfer(struct i2c_adapter *i2c_adap, 487static int dm1105_i2c_xfer(struct i2c_adapter *i2c_adap,
332 struct i2c_msg *msgs, int num) 488 struct i2c_msg *msgs, int num)
333{ 489{
@@ -436,31 +592,20 @@ static inline struct dm1105_dev *frontend_to_dm1105_dev(struct dvb_frontend *fe)
436static int dm1105_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) 592static int dm1105_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
437{ 593{
438 struct dm1105_dev *dev = frontend_to_dm1105_dev(fe); 594 struct dm1105_dev *dev = frontend_to_dm1105_dev(fe);
439 u32 lnb_mask, lnb_13v, lnb_18v, lnb_off;
440 595
441 switch (dev->boardnr) { 596 dm1105_gpio_enable(dev, dm1105_boards[dev->boardnr].lnb.mask, 1);
442 case DM1105_BOARD_AXESS_DM05:
443 lnb_mask = DM05_LNB_MASK;
444 lnb_off = DM05_LNB_OFF;
445 lnb_13v = DM05_LNB_13V;
446 lnb_18v = DM05_LNB_18V;
447 break;
448 case DM1105_BOARD_DVBWORLD_2002:
449 case DM1105_BOARD_DVBWORLD_2004:
450 default:
451 lnb_mask = DM1105_LNB_MASK;
452 lnb_off = DM1105_LNB_OFF;
453 lnb_13v = DM1105_LNB_13V;
454 lnb_18v = DM1105_LNB_18V;
455 }
456
457 dm_writel(DM1105_GPIOCTR, lnb_mask);
458 if (voltage == SEC_VOLTAGE_18) 597 if (voltage == SEC_VOLTAGE_18)
459 dm_writel(DM1105_GPIOVAL, lnb_18v); 598 dm1105_gpio_andor(dev,
599 dm1105_boards[dev->boardnr].lnb.mask,
600 dm1105_boards[dev->boardnr].lnb.v18);
460 else if (voltage == SEC_VOLTAGE_13) 601 else if (voltage == SEC_VOLTAGE_13)
461 dm_writel(DM1105_GPIOVAL, lnb_13v); 602 dm1105_gpio_andor(dev,
603 dm1105_boards[dev->boardnr].lnb.mask,
604 dm1105_boards[dev->boardnr].lnb.v13);
462 else 605 else
463 dm_writel(DM1105_GPIOVAL, lnb_off); 606 dm1105_gpio_andor(dev,
607 dm1105_boards[dev->boardnr].lnb.mask,
608 dm1105_boards[dev->boardnr].lnb.off);
464 609
465 return 0; 610 return 0;
466} 611}
@@ -708,6 +853,38 @@ static int __devinit frontend_init(struct dm1105_dev *dev)
708 int ret; 853 int ret;
709 854
710 switch (dev->boardnr) { 855 switch (dev->boardnr) {
856 case DM1105_BOARD_UNBRANDED_I2C_ON_GPIO:
857 dm1105_gpio_enable(dev, GPIO15, 1);
858 dm1105_gpio_clear(dev, GPIO15);
859 msleep(100);
860 dm1105_gpio_set(dev, GPIO15);
861 msleep(200);
862 dev->fe = dvb_attach(
863 stv0299_attach, &sharp_z0194a_config,
864 &dev->i2c_bb_adap);
865 if (dev->fe) {
866 dev->fe->ops.set_voltage = dm1105_set_voltage;
867 dvb_attach(dvb_pll_attach, dev->fe, 0x60,
868 &dev->i2c_bb_adap, DVB_PLL_OPERA1);
869 break;
870 }
871
872 dev->fe = dvb_attach(
873 stv0288_attach, &earda_config,
874 &dev->i2c_bb_adap);
875 if (dev->fe) {
876 dev->fe->ops.set_voltage = dm1105_set_voltage;
877 dvb_attach(stb6000_attach, dev->fe, 0x61,
878 &dev->i2c_bb_adap);
879 break;
880 }
881
882 dev->fe = dvb_attach(
883 si21xx_attach, &serit_config,
884 &dev->i2c_bb_adap);
885 if (dev->fe)
886 dev->fe->ops.set_voltage = dm1105_set_voltage;
887 break;
711 case DM1105_BOARD_DVBWORLD_2004: 888 case DM1105_BOARD_DVBWORLD_2004:
712 dev->fe = dvb_attach( 889 dev->fe = dvb_attach(
713 cx24116_attach, &serit_sp2633_config, 890 cx24116_attach, &serit_sp2633_config,
@@ -870,11 +1047,32 @@ static int __devinit dm1105_probe(struct pci_dev *pdev,
870 if (ret < 0) 1047 if (ret < 0)
871 goto err_dm1105_hw_exit; 1048 goto err_dm1105_hw_exit;
872 1049
1050 i2c_set_adapdata(&dev->i2c_bb_adap, dev);
1051 strcpy(dev->i2c_bb_adap.name, DM1105_I2C_GPIO_NAME);
1052 dev->i2c_bb_adap.owner = THIS_MODULE;
1053 dev->i2c_bb_adap.dev.parent = &pdev->dev;
1054 dev->i2c_bb_adap.algo_data = &dev->i2c_bit;
1055 dev->i2c_bit.data = dev;
1056 dev->i2c_bit.setsda = dm1105_setsda;
1057 dev->i2c_bit.setscl = dm1105_setscl;
1058 dev->i2c_bit.getsda = dm1105_getsda;
1059 dev->i2c_bit.getscl = dm1105_getscl;
1060 dev->i2c_bit.udelay = 10;
1061 dev->i2c_bit.timeout = 10;
1062
1063 /* Raise SCL and SDA */
1064 dm1105_setsda(dev, 1);
1065 dm1105_setscl(dev, 1);
1066
1067 ret = i2c_bit_add_bus(&dev->i2c_bb_adap);
1068 if (ret < 0)
1069 goto err_i2c_del_adapter;
1070
873 /* dvb */ 1071 /* dvb */
874 ret = dvb_register_adapter(&dev->dvb_adapter, DRIVER_NAME, 1072 ret = dvb_register_adapter(&dev->dvb_adapter, DRIVER_NAME,
875 THIS_MODULE, &pdev->dev, adapter_nr); 1073 THIS_MODULE, &pdev->dev, adapter_nr);
876 if (ret < 0) 1074 if (ret < 0)
877 goto err_i2c_del_adapter; 1075 goto err_i2c_del_adapters;
878 1076
879 dvb_adapter = &dev->dvb_adapter; 1077 dvb_adapter = &dev->dvb_adapter;
880 1078
@@ -952,6 +1150,8 @@ err_dvb_dmx_release:
952 dvb_dmx_release(dvbdemux); 1150 dvb_dmx_release(dvbdemux);
953err_dvb_unregister_adapter: 1151err_dvb_unregister_adapter:
954 dvb_unregister_adapter(dvb_adapter); 1152 dvb_unregister_adapter(dvb_adapter);
1153err_i2c_del_adapters:
1154 i2c_del_adapter(&dev->i2c_bb_adap);
955err_i2c_del_adapter: 1155err_i2c_del_adapter:
956 i2c_del_adapter(&dev->i2c_adap); 1156 i2c_del_adapter(&dev->i2c_adap);
957err_dm1105_hw_exit: 1157err_dm1105_hw_exit:
diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
index f36f471deae2..37b146961ae2 100644
--- a/drivers/media/dvb/dvb-usb/lmedm04.c
+++ b/drivers/media/dvb/dvb-usb/lmedm04.c
@@ -207,17 +207,6 @@ static int lme2510_stream_restart(struct dvb_usb_device *d)
207 rbuff, sizeof(rbuff)); 207 rbuff, sizeof(rbuff));
208 return ret; 208 return ret;
209} 209}
210static int lme2510_remote_keypress(struct dvb_usb_adapter *adap, u32 keypress)
211{
212 struct dvb_usb_device *d = adap->dev;
213
214 deb_info(1, "INT Key Keypress =%04x", keypress);
215
216 if (keypress > 0)
217 rc_keydown(d->rc_dev, keypress, 0);
218
219 return 0;
220}
221 210
222static int lme2510_enable_pid(struct dvb_usb_device *d, u8 index, u16 pid_out) 211static int lme2510_enable_pid(struct dvb_usb_device *d, u8 index, u16 pid_out)
223{ 212{
@@ -256,6 +245,7 @@ static void lme2510_int_response(struct urb *lme_urb)
256 struct lme2510_state *st = adap->dev->priv; 245 struct lme2510_state *st = adap->dev->priv;
257 static u8 *ibuf, *rbuf; 246 static u8 *ibuf, *rbuf;
258 int i = 0, offset; 247 int i = 0, offset;
248 u32 key;
259 249
260 switch (lme_urb->status) { 250 switch (lme_urb->status) {
261 case 0: 251 case 0:
@@ -282,10 +272,16 @@ static void lme2510_int_response(struct urb *lme_urb)
282 272
283 switch (ibuf[0]) { 273 switch (ibuf[0]) {
284 case 0xaa: 274 case 0xaa:
285 debug_data_snipet(1, "INT Remote data snipet in", ibuf); 275 debug_data_snipet(1, "INT Remote data snipet", ibuf);
286 lme2510_remote_keypress(adap, 276 if ((ibuf[4] + ibuf[5]) == 0xff) {
287 (u32)(ibuf[2] << 24) + (ibuf[3] << 16) + 277 key = ibuf[5];
288 (ibuf[4] << 8) + ibuf[5]); 278 key += (ibuf[3] > 0)
279 ? (ibuf[3] ^ 0xff) << 8 : 0;
280 key += (ibuf[2] ^ 0xff) << 16;
281 deb_info(1, "INT Key =%08x", key);
282 if (adap->dev->rc_dev != NULL)
283 rc_keydown(adap->dev->rc_dev, key, 0);
284 }
289 break; 285 break;
290 case 0xbb: 286 case 0xbb:
291 switch (st->tuner_config) { 287 switch (st->tuner_config) {
@@ -691,45 +687,6 @@ static int lme2510_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
691 return (ret < 0) ? -ENODEV : 0; 687 return (ret < 0) ? -ENODEV : 0;
692} 688}
693 689
694static int lme2510_int_service(struct dvb_usb_adapter *adap)
695{
696 struct dvb_usb_device *d = adap->dev;
697 struct rc_dev *rc;
698 int ret;
699
700 info("STA Configuring Remote");
701
702 rc = rc_allocate_device();
703 if (!rc)
704 return -ENOMEM;
705
706 usb_make_path(d->udev, d->rc_phys, sizeof(d->rc_phys));
707 strlcat(d->rc_phys, "/ir0", sizeof(d->rc_phys));
708
709 rc->input_name = "LME2510 Remote Control";
710 rc->input_phys = d->rc_phys;
711 rc->map_name = RC_MAP_LME2510;
712 rc->driver_name = "LME 2510";
713 usb_to_input_id(d->udev, &rc->input_id);
714
715 ret = rc_register_device(rc);
716 if (ret) {
717 rc_free_device(rc);
718 return ret;
719 }
720 d->rc_dev = rc;
721
722 /* Start the Interrupt */
723 ret = lme2510_int_read(adap);
724 if (ret < 0) {
725 rc_unregister_device(rc);
726 info("INT Unable to start Interrupt Service");
727 return -ENODEV;
728 }
729
730 return 0;
731}
732
733static u8 check_sum(u8 *p, u8 len) 690static u8 check_sum(u8 *p, u8 len)
734{ 691{
735 u8 sum = 0; 692 u8 sum = 0;
@@ -831,7 +788,7 @@ static int lme_firmware_switch(struct usb_device *udev, int cold)
831 788
832 cold_fw = !cold; 789 cold_fw = !cold;
833 790
834 if (udev->descriptor.idProduct == 0x1122) { 791 if (le16_to_cpu(udev->descriptor.idProduct) == 0x1122) {
835 switch (dvb_usb_lme2510_firmware) { 792 switch (dvb_usb_lme2510_firmware) {
836 default: 793 default:
837 dvb_usb_lme2510_firmware = TUNER_S0194; 794 dvb_usb_lme2510_firmware = TUNER_S0194;
@@ -1053,8 +1010,11 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
1053 1010
1054 1011
1055end: if (ret) { 1012end: if (ret) {
1056 kfree(adap->fe); 1013 if (adap->fe) {
1057 adap->fe = NULL; 1014 dvb_frontend_detach(adap->fe);
1015 adap->fe = NULL;
1016 }
1017 adap->dev->props.rc.core.rc_codes = NULL;
1058 return -ENODEV; 1018 return -ENODEV;
1059 } 1019 }
1060 1020
@@ -1097,8 +1057,12 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap)
1097 return -ENODEV; 1057 return -ENODEV;
1098 } 1058 }
1099 1059
1100 /* Start the Interrupt & Remote*/ 1060 /* Start the Interrupt*/
1101 ret = lme2510_int_service(adap); 1061 ret = lme2510_int_read(adap);
1062 if (ret < 0) {
1063 info("INT Unable to start Interrupt Service");
1064 return -ENODEV;
1065 }
1102 1066
1103 return ret; 1067 return ret;
1104} 1068}
@@ -1204,6 +1168,12 @@ static struct dvb_usb_device_properties lme2510_properties = {
1204 } 1168 }
1205 } 1169 }
1206 }, 1170 },
1171 .rc.core = {
1172 .protocol = RC_TYPE_NEC,
1173 .module_name = "LME2510 Remote Control",
1174 .allowed_protos = RC_TYPE_NEC,
1175 .rc_codes = RC_MAP_LME2510,
1176 },
1207 .power_ctrl = lme2510_powerup, 1177 .power_ctrl = lme2510_powerup,
1208 .identify_state = lme2510_identify_state, 1178 .identify_state = lme2510_identify_state,
1209 .i2c_algo = &lme2510_i2c_algo, 1179 .i2c_algo = &lme2510_i2c_algo,
@@ -1246,6 +1216,12 @@ static struct dvb_usb_device_properties lme2510c_properties = {
1246 } 1216 }
1247 } 1217 }
1248 }, 1218 },
1219 .rc.core = {
1220 .protocol = RC_TYPE_NEC,
1221 .module_name = "LME2510 Remote Control",
1222 .allowed_protos = RC_TYPE_NEC,
1223 .rc_codes = RC_MAP_LME2510,
1224 },
1249 .power_ctrl = lme2510_powerup, 1225 .power_ctrl = lme2510_powerup,
1250 .identify_state = lme2510_identify_state, 1226 .identify_state = lme2510_identify_state,
1251 .i2c_algo = &lme2510_i2c_algo, 1227 .i2c_algo = &lme2510_i2c_algo,
@@ -1269,19 +1245,21 @@ static void *lme2510_exit_int(struct dvb_usb_device *d)
1269 adap->feedcount = 0; 1245 adap->feedcount = 0;
1270 } 1246 }
1271 1247
1272 if (st->lme_urb != NULL) { 1248 if (st->usb_buffer != NULL) {
1273 st->i2c_talk_onoff = 1; 1249 st->i2c_talk_onoff = 1;
1274 st->signal_lock = 0; 1250 st->signal_lock = 0;
1275 st->signal_level = 0; 1251 st->signal_level = 0;
1276 st->signal_sn = 0; 1252 st->signal_sn = 0;
1277 buffer = st->usb_buffer; 1253 buffer = st->usb_buffer;
1254 }
1255
1256 if (st->lme_urb != NULL) {
1278 usb_kill_urb(st->lme_urb); 1257 usb_kill_urb(st->lme_urb);
1279 usb_free_coherent(d->udev, 5000, st->buffer, 1258 usb_free_coherent(d->udev, 5000, st->buffer,
1280 st->lme_urb->transfer_dma); 1259 st->lme_urb->transfer_dma);
1281 info("Interrupt Service Stopped"); 1260 info("Interrupt Service Stopped");
1282 rc_unregister_device(d->rc_dev);
1283 info("Remote Stopped");
1284 } 1261 }
1262
1285 return buffer; 1263 return buffer;
1286} 1264}
1287 1265
@@ -1293,7 +1271,8 @@ static void lme2510_exit(struct usb_interface *intf)
1293 if (d != NULL) { 1271 if (d != NULL) {
1294 usb_buffer = lme2510_exit_int(d); 1272 usb_buffer = lme2510_exit_int(d);
1295 dvb_usb_device_exit(intf); 1273 dvb_usb_device_exit(intf);
1296 kfree(usb_buffer); 1274 if (usb_buffer != NULL)
1275 kfree(usb_buffer);
1297 } 1276 }
1298} 1277}
1299 1278
@@ -1327,5 +1306,5 @@ module_exit(lme2510_module_exit);
1327 1306
1328MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>"); 1307MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
1329MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0"); 1308MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0");
1330MODULE_VERSION("1.86"); 1309MODULE_VERSION("1.88");
1331MODULE_LICENSE("GPL"); 1310MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/stb0899_algo.c b/drivers/media/dvb/frontends/stb0899_algo.c
index 2da55ec20392..d70eee00f33a 100644
--- a/drivers/media/dvb/frontends/stb0899_algo.c
+++ b/drivers/media/dvb/frontends/stb0899_algo.c
@@ -23,7 +23,7 @@
23#include "stb0899_priv.h" 23#include "stb0899_priv.h"
24#include "stb0899_reg.h" 24#include "stb0899_reg.h"
25 25
26inline u32 stb0899_do_div(u64 n, u32 d) 26static inline u32 stb0899_do_div(u64 n, u32 d)
27{ 27{
28 /* wrap do_div() for ease of use */ 28 /* wrap do_div() for ease of use */
29 29
diff --git a/drivers/media/dvb/frontends/tda8261.c b/drivers/media/dvb/frontends/tda8261.c
index 1742056a34e8..53c7d8f1df28 100644
--- a/drivers/media/dvb/frontends/tda8261.c
+++ b/drivers/media/dvb/frontends/tda8261.c
@@ -224,7 +224,6 @@ exit:
224} 224}
225 225
226EXPORT_SYMBOL(tda8261_attach); 226EXPORT_SYMBOL(tda8261_attach);
227MODULE_PARM_DESC(verbose, "Set verbosity level");
228 227
229MODULE_AUTHOR("Manu Abraham"); 228MODULE_AUTHOR("Manu Abraham");
230MODULE_DESCRIPTION("TDA8261 8PSK/QPSK Tuner"); 229MODULE_DESCRIPTION("TDA8261 8PSK/QPSK Tuner");
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index 5c2a9058c09f..e83e84003025 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -412,8 +412,7 @@ static int __devinit maxiradio_init_one(struct pci_dev *pdev, const struct pci_d
412 goto err_out_free_region; 412 goto err_out_free_region;
413 } 413 }
414 414
415 v4l2_info(v4l2_dev, "version " DRIVER_VERSION 415 v4l2_info(v4l2_dev, "version " DRIVER_VERSION "\n");
416 " time " __TIME__ " " __DATE__ "\n");
417 416
418 v4l2_info(v4l2_dev, "found Guillemot MAXI Radio device (io = 0x%x)\n", 417 v4l2_info(v4l2_dev, "found Guillemot MAXI Radio device (io = 0x%x)\n",
419 dev->io); 418 dev->io);
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 46cacf845049..459f7272d326 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1382,7 +1382,7 @@ static int wl1273_fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
1382 1382
1383 switch (ctrl->id) { 1383 switch (ctrl->id) {
1384 case V4L2_CID_TUNE_ANTENNA_CAPACITOR: 1384 case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
1385 ctrl->val = wl1273_fm_get_tx_ctune(radio); 1385 ctrl->cur.val = wl1273_fm_get_tx_ctune(radio);
1386 break; 1386 break;
1387 1387
1388 default: 1388 default:
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index d50e5ac75ab6..87010724f914 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -191,7 +191,7 @@ static int fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
191 191
192 switch (ctrl->id) { 192 switch (ctrl->id) {
193 case V4L2_CID_TUNE_ANTENNA_CAPACITOR: 193 case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
194 ctrl->val = fm_tx_get_tune_cap_val(fmdev); 194 ctrl->cur.val = fm_tx_get_tune_cap_val(fmdev);
195 break; 195 break;
196 default: 196 default:
197 fmwarn("%s: Unknown IOCTL: %d\n", __func__, ctrl->id); 197 fmwarn("%s: Unknown IOCTL: %d\n", __func__, ctrl->id);
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 154c337f00fd..7d4bbc226d06 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -148,6 +148,18 @@ config IR_ITE_CIR
148 To compile this driver as a module, choose M here: the 148 To compile this driver as a module, choose M here: the
149 module will be called ite-cir. 149 module will be called ite-cir.
150 150
151config IR_FINTEK
152 tristate "Fintek Consumer Infrared Transceiver"
153 depends on PNP
154 depends on RC_CORE
155 ---help---
156 Say Y here to enable support for integrated infrared receiver
157 /transciever made by Fintek. This chip is found on assorted
158 Jetway motherboards (and of course, possibly others).
159
160 To compile this driver as a module, choose M here: the
161 module will be called fintek-cir.
162
151config IR_NUVOTON 163config IR_NUVOTON
152 tristate "Nuvoton w836x7hg Consumer Infrared Transceiver" 164 tristate "Nuvoton w836x7hg Consumer Infrared Transceiver"
153 depends on PNP 165 depends on PNP
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 1f90a219a162..52830e5f4eaa 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_IR_LIRC_CODEC) += ir-lirc-codec.o
16obj-$(CONFIG_IR_IMON) += imon.o 16obj-$(CONFIG_IR_IMON) += imon.o
17obj-$(CONFIG_IR_ITE_CIR) += ite-cir.o 17obj-$(CONFIG_IR_ITE_CIR) += ite-cir.o
18obj-$(CONFIG_IR_MCEUSB) += mceusb.o 18obj-$(CONFIG_IR_MCEUSB) += mceusb.o
19obj-$(CONFIG_IR_FINTEK) += fintek-cir.o
19obj-$(CONFIG_IR_NUVOTON) += nuvoton-cir.o 20obj-$(CONFIG_IR_NUVOTON) += nuvoton-cir.o
20obj-$(CONFIG_IR_ENE) += ene_ir.o 21obj-$(CONFIG_IR_ENE) += ene_ir.o
21obj-$(CONFIG_IR_REDRAT3) += redrat3.o 22obj-$(CONFIG_IR_REDRAT3) += redrat3.o
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
new file mode 100644
index 000000000000..8fa539dde1b4
--- /dev/null
+++ b/drivers/media/rc/fintek-cir.c
@@ -0,0 +1,684 @@
1/*
2 * Driver for Feature Integration Technology Inc. (aka Fintek) LPC CIR
3 *
4 * Copyright (C) 2011 Jarod Wilson <jarod@redhat.com>
5 *
6 * Special thanks to Fintek for providing hardware and spec sheets.
7 * This driver is based upon the nuvoton, ite and ene drivers for
8 * similar hardware.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
23 * USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/pnp.h>
29#include <linux/io.h>
30#include <linux/interrupt.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <media/rc-core.h>
34#include <linux/pci_ids.h>
35
36#include "fintek-cir.h"
37
38/* write val to config reg */
39static inline void fintek_cr_write(struct fintek_dev *fintek, u8 val, u8 reg)
40{
41 fit_dbg("%s: reg 0x%02x, val 0x%02x (ip/dp: %02x/%02x)",
42 __func__, reg, val, fintek->cr_ip, fintek->cr_dp);
43 outb(reg, fintek->cr_ip);
44 outb(val, fintek->cr_dp);
45}
46
47/* read val from config reg */
48static inline u8 fintek_cr_read(struct fintek_dev *fintek, u8 reg)
49{
50 u8 val;
51
52 outb(reg, fintek->cr_ip);
53 val = inb(fintek->cr_dp);
54
55 fit_dbg("%s: reg 0x%02x, val 0x%02x (ip/dp: %02x/%02x)",
56 __func__, reg, val, fintek->cr_ip, fintek->cr_dp);
57 return val;
58}
59
60/* update config register bit without changing other bits */
61static inline void fintek_set_reg_bit(struct fintek_dev *fintek, u8 val, u8 reg)
62{
63 u8 tmp = fintek_cr_read(fintek, reg) | val;
64 fintek_cr_write(fintek, tmp, reg);
65}
66
67/* clear config register bit without changing other bits */
68static inline void fintek_clear_reg_bit(struct fintek_dev *fintek, u8 val, u8 reg)
69{
70 u8 tmp = fintek_cr_read(fintek, reg) & ~val;
71 fintek_cr_write(fintek, tmp, reg);
72}
73
74/* enter config mode */
75static inline void fintek_config_mode_enable(struct fintek_dev *fintek)
76{
77 /* Enabling Config Mode explicitly requires writing 2x */
78 outb(CONFIG_REG_ENABLE, fintek->cr_ip);
79 outb(CONFIG_REG_ENABLE, fintek->cr_ip);
80}
81
82/* exit config mode */
83static inline void fintek_config_mode_disable(struct fintek_dev *fintek)
84{
85 outb(CONFIG_REG_DISABLE, fintek->cr_ip);
86}
87
88/*
89 * When you want to address a specific logical device, write its logical
90 * device number to GCR_LOGICAL_DEV_NO
91 */
92static inline void fintek_select_logical_dev(struct fintek_dev *fintek, u8 ldev)
93{
94 fintek_cr_write(fintek, ldev, GCR_LOGICAL_DEV_NO);
95}
96
97/* write val to cir config register */
98static inline void fintek_cir_reg_write(struct fintek_dev *fintek, u8 val, u8 offset)
99{
100 outb(val, fintek->cir_addr + offset);
101}
102
103/* read val from cir config register */
104static u8 fintek_cir_reg_read(struct fintek_dev *fintek, u8 offset)
105{
106 u8 val;
107
108 val = inb(fintek->cir_addr + offset);
109
110 return val;
111}
112
113#define pr_reg(text, ...) \
114 printk(KERN_INFO KBUILD_MODNAME ": " text, ## __VA_ARGS__)
115
116/* dump current cir register contents */
117static void cir_dump_regs(struct fintek_dev *fintek)
118{
119 fintek_config_mode_enable(fintek);
120 fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
121
122 pr_reg("%s: Dump CIR logical device registers:\n", FINTEK_DRIVER_NAME);
123 pr_reg(" * CR CIR BASE ADDR: 0x%x\n",
124 (fintek_cr_read(fintek, CIR_CR_BASE_ADDR_HI) << 8) |
125 fintek_cr_read(fintek, CIR_CR_BASE_ADDR_LO));
126 pr_reg(" * CR CIR IRQ NUM: 0x%x\n",
127 fintek_cr_read(fintek, CIR_CR_IRQ_SEL));
128
129 fintek_config_mode_disable(fintek);
130
131 pr_reg("%s: Dump CIR registers:\n", FINTEK_DRIVER_NAME);
132 pr_reg(" * STATUS: 0x%x\n", fintek_cir_reg_read(fintek, CIR_STATUS));
133 pr_reg(" * CONTROL: 0x%x\n", fintek_cir_reg_read(fintek, CIR_CONTROL));
134 pr_reg(" * RX_DATA: 0x%x\n", fintek_cir_reg_read(fintek, CIR_RX_DATA));
135 pr_reg(" * TX_CONTROL: 0x%x\n", fintek_cir_reg_read(fintek, CIR_TX_CONTROL));
136 pr_reg(" * TX_DATA: 0x%x\n", fintek_cir_reg_read(fintek, CIR_TX_DATA));
137}
138
139/* detect hardware features */
140static int fintek_hw_detect(struct fintek_dev *fintek)
141{
142 unsigned long flags;
143 u8 chip_major, chip_minor;
144 u8 vendor_major, vendor_minor;
145 u8 portsel, ir_class;
146 u16 vendor;
147 int ret = 0;
148
149 fintek_config_mode_enable(fintek);
150
151 /* Check if we're using config port 0x4e or 0x2e */
152 portsel = fintek_cr_read(fintek, GCR_CONFIG_PORT_SEL);
153 if (portsel == 0xff) {
154 fit_pr(KERN_INFO, "first portsel read was bunk, trying alt");
155 fintek_config_mode_disable(fintek);
156 fintek->cr_ip = CR_INDEX_PORT2;
157 fintek->cr_dp = CR_DATA_PORT2;
158 fintek_config_mode_enable(fintek);
159 portsel = fintek_cr_read(fintek, GCR_CONFIG_PORT_SEL);
160 }
161 fit_dbg("portsel reg: 0x%02x", portsel);
162
163 ir_class = fintek_cir_reg_read(fintek, CIR_CR_CLASS);
164 fit_dbg("ir_class reg: 0x%02x", ir_class);
165
166 switch (ir_class) {
167 case CLASS_RX_2TX:
168 case CLASS_RX_1TX:
169 fintek->hw_tx_capable = true;
170 break;
171 case CLASS_RX_ONLY:
172 default:
173 fintek->hw_tx_capable = false;
174 break;
175 }
176
177 chip_major = fintek_cr_read(fintek, GCR_CHIP_ID_HI);
178 chip_minor = fintek_cr_read(fintek, GCR_CHIP_ID_LO);
179
180 vendor_major = fintek_cr_read(fintek, GCR_VENDOR_ID_HI);
181 vendor_minor = fintek_cr_read(fintek, GCR_VENDOR_ID_LO);
182 vendor = vendor_major << 8 | vendor_minor;
183
184 if (vendor != VENDOR_ID_FINTEK)
185 fit_pr(KERN_WARNING, "Unknown vendor ID: 0x%04x", vendor);
186 else
187 fit_dbg("Read Fintek vendor ID from chip");
188
189 fintek_config_mode_disable(fintek);
190
191 spin_lock_irqsave(&fintek->fintek_lock, flags);
192 fintek->chip_major = chip_major;
193 fintek->chip_minor = chip_minor;
194 fintek->chip_vendor = vendor;
195 spin_unlock_irqrestore(&fintek->fintek_lock, flags);
196
197 return ret;
198}
199
200static void fintek_cir_ldev_init(struct fintek_dev *fintek)
201{
202 /* Select CIR logical device and enable */
203 fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
204 fintek_cr_write(fintek, LOGICAL_DEV_ENABLE, CIR_CR_DEV_EN);
205
206 /* Write allocated CIR address and IRQ information to hardware */
207 fintek_cr_write(fintek, fintek->cir_addr >> 8, CIR_CR_BASE_ADDR_HI);
208 fintek_cr_write(fintek, fintek->cir_addr & 0xff, CIR_CR_BASE_ADDR_LO);
209
210 fintek_cr_write(fintek, fintek->cir_irq, CIR_CR_IRQ_SEL);
211
212 fit_dbg("CIR initialized, base io address: 0x%lx, irq: %d (len: %d)",
213 fintek->cir_addr, fintek->cir_irq, fintek->cir_port_len);
214}
215
216/* enable CIR interrupts */
217static void fintek_enable_cir_irq(struct fintek_dev *fintek)
218{
219 fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_EN, CIR_STATUS);
220}
221
222static void fintek_cir_regs_init(struct fintek_dev *fintek)
223{
224 /* clear any and all stray interrupts */
225 fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
226
227 /* and finally, enable interrupts */
228 fintek_enable_cir_irq(fintek);
229}
230
231static void fintek_enable_wake(struct fintek_dev *fintek)
232{
233 fintek_config_mode_enable(fintek);
234 fintek_select_logical_dev(fintek, LOGICAL_DEV_ACPI);
235
236 /* Allow CIR PME's to wake system */
237 fintek_set_reg_bit(fintek, ACPI_WAKE_EN_CIR_BIT, LDEV_ACPI_WAKE_EN_REG);
238 /* Enable CIR PME's */
239 fintek_set_reg_bit(fintek, ACPI_PME_CIR_BIT, LDEV_ACPI_PME_EN_REG);
240 /* Clear CIR PME status register */
241 fintek_set_reg_bit(fintek, ACPI_PME_CIR_BIT, LDEV_ACPI_PME_CLR_REG);
242 /* Save state */
243 fintek_set_reg_bit(fintek, ACPI_STATE_CIR_BIT, LDEV_ACPI_STATE_REG);
244
245 fintek_config_mode_disable(fintek);
246}
247
248static int fintek_cmdsize(u8 cmd, u8 subcmd)
249{
250 int datasize = 0;
251
252 switch (cmd) {
253 case BUF_COMMAND_NULL:
254 if (subcmd == BUF_HW_CMD_HEADER)
255 datasize = 1;
256 break;
257 case BUF_HW_CMD_HEADER:
258 if (subcmd == BUF_CMD_G_REVISION)
259 datasize = 2;
260 break;
261 case BUF_COMMAND_HEADER:
262 switch (subcmd) {
263 case BUF_CMD_S_CARRIER:
264 case BUF_CMD_S_TIMEOUT:
265 case BUF_RSP_PULSE_COUNT:
266 datasize = 2;
267 break;
268 case BUF_CMD_SIG_END:
269 case BUF_CMD_S_TXMASK:
270 case BUF_CMD_S_RXSENSOR:
271 datasize = 1;
272 break;
273 }
274 }
275
276 return datasize;
277}
278
279/* process ir data stored in driver buffer */
280static void fintek_process_rx_ir_data(struct fintek_dev *fintek)
281{
282 DEFINE_IR_RAW_EVENT(rawir);
283 u8 sample;
284 int i;
285
286 for (i = 0; i < fintek->pkts; i++) {
287 sample = fintek->buf[i];
288 switch (fintek->parser_state) {
289 case CMD_HEADER:
290 fintek->cmd = sample;
291 if ((fintek->cmd == BUF_COMMAND_HEADER) ||
292 ((fintek->cmd & BUF_COMMAND_MASK) !=
293 BUF_PULSE_BIT)) {
294 fintek->parser_state = SUBCMD;
295 continue;
296 }
297 fintek->rem = (fintek->cmd & BUF_LEN_MASK);
298 fit_dbg("%s: rem: 0x%02x", __func__, fintek->rem);
299 if (fintek->rem)
300 fintek->parser_state = PARSE_IRDATA;
301 else
302 ir_raw_event_reset(fintek->rdev);
303 break;
304 case SUBCMD:
305 fintek->rem = fintek_cmdsize(fintek->cmd, sample);
306 fintek->parser_state = CMD_DATA;
307 break;
308 case CMD_DATA:
309 fintek->rem--;
310 break;
311 case PARSE_IRDATA:
312 fintek->rem--;
313 init_ir_raw_event(&rawir);
314 rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
315 rawir.duration = US_TO_NS((sample & BUF_SAMPLE_MASK)
316 * CIR_SAMPLE_PERIOD);
317
318 fit_dbg("Storing %s with duration %d",
319 rawir.pulse ? "pulse" : "space",
320 rawir.duration);
321 ir_raw_event_store_with_filter(fintek->rdev, &rawir);
322 break;
323 }
324
325 if ((fintek->parser_state != CMD_HEADER) && !fintek->rem)
326 fintek->parser_state = CMD_HEADER;
327 }
328
329 fintek->pkts = 0;
330
331 fit_dbg("Calling ir_raw_event_handle");
332 ir_raw_event_handle(fintek->rdev);
333}
334
335/* copy data from hardware rx register into driver buffer */
336static void fintek_get_rx_ir_data(struct fintek_dev *fintek, u8 rx_irqs)
337{
338 unsigned long flags;
339 u8 sample, status;
340
341 spin_lock_irqsave(&fintek->fintek_lock, flags);
342
343 /*
344 * We must read data from CIR_RX_DATA until the hardware IR buffer
345 * is empty and clears the RX_TIMEOUT and/or RX_RECEIVE flags in
346 * the CIR_STATUS register
347 */
348 do {
349 sample = fintek_cir_reg_read(fintek, CIR_RX_DATA);
350 fit_dbg("%s: sample: 0x%02x", __func__, sample);
351
352 fintek->buf[fintek->pkts] = sample;
353 fintek->pkts++;
354
355 status = fintek_cir_reg_read(fintek, CIR_STATUS);
356 if (!(status & CIR_STATUS_IRQ_EN))
357 break;
358 } while (status & rx_irqs);
359
360 fintek_process_rx_ir_data(fintek);
361
362 spin_unlock_irqrestore(&fintek->fintek_lock, flags);
363}
364
365static void fintek_cir_log_irqs(u8 status)
366{
367 fit_pr(KERN_INFO, "IRQ 0x%02x:%s%s%s%s%s", status,
368 status & CIR_STATUS_IRQ_EN ? " IRQEN" : "",
369 status & CIR_STATUS_TX_FINISH ? " TXF" : "",
370 status & CIR_STATUS_TX_UNDERRUN ? " TXU" : "",
371 status & CIR_STATUS_RX_TIMEOUT ? " RXTO" : "",
372 status & CIR_STATUS_RX_RECEIVE ? " RXOK" : "");
373}
374
375/* interrupt service routine for incoming and outgoing CIR data */
376static irqreturn_t fintek_cir_isr(int irq, void *data)
377{
378 struct fintek_dev *fintek = data;
379 u8 status, rx_irqs;
380
381 fit_dbg_verbose("%s firing", __func__);
382
383 fintek_config_mode_enable(fintek);
384 fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
385 fintek_config_mode_disable(fintek);
386
387 /*
388 * Get IR Status register contents. Write 1 to ack/clear
389 *
390 * bit: reg name - description
391 * 3: TX_FINISH - TX is finished
392 * 2: TX_UNDERRUN - TX underrun
393 * 1: RX_TIMEOUT - RX data timeout
394 * 0: RX_RECEIVE - RX data received
395 */
396 status = fintek_cir_reg_read(fintek, CIR_STATUS);
397 if (!(status & CIR_STATUS_IRQ_MASK) || status == 0xff) {
398 fit_dbg_verbose("%s exiting, IRSTS 0x%02x", __func__, status);
399 fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
400 return IRQ_RETVAL(IRQ_NONE);
401 }
402
403 if (debug)
404 fintek_cir_log_irqs(status);
405
406 rx_irqs = status & (CIR_STATUS_RX_RECEIVE | CIR_STATUS_RX_TIMEOUT);
407 if (rx_irqs)
408 fintek_get_rx_ir_data(fintek, rx_irqs);
409
410 /* ack/clear all irq flags we've got */
411 fintek_cir_reg_write(fintek, status, CIR_STATUS);
412
413 fit_dbg_verbose("%s done", __func__);
414 return IRQ_RETVAL(IRQ_HANDLED);
415}
416
417static void fintek_enable_cir(struct fintek_dev *fintek)
418{
419 /* set IRQ enabled */
420 fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_EN, CIR_STATUS);
421
422 fintek_config_mode_enable(fintek);
423
424 /* enable the CIR logical device */
425 fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
426 fintek_cr_write(fintek, LOGICAL_DEV_ENABLE, CIR_CR_DEV_EN);
427
428 fintek_config_mode_disable(fintek);
429
430 /* clear all pending interrupts */
431 fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
432
433 /* enable interrupts */
434 fintek_enable_cir_irq(fintek);
435}
436
437static void fintek_disable_cir(struct fintek_dev *fintek)
438{
439 fintek_config_mode_enable(fintek);
440
441 /* disable the CIR logical device */
442 fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
443 fintek_cr_write(fintek, LOGICAL_DEV_DISABLE, CIR_CR_DEV_EN);
444
445 fintek_config_mode_disable(fintek);
446}
447
448static int fintek_open(struct rc_dev *dev)
449{
450 struct fintek_dev *fintek = dev->priv;
451 unsigned long flags;
452
453 spin_lock_irqsave(&fintek->fintek_lock, flags);
454 fintek_enable_cir(fintek);
455 spin_unlock_irqrestore(&fintek->fintek_lock, flags);
456
457 return 0;
458}
459
460static void fintek_close(struct rc_dev *dev)
461{
462 struct fintek_dev *fintek = dev->priv;
463 unsigned long flags;
464
465 spin_lock_irqsave(&fintek->fintek_lock, flags);
466 fintek_disable_cir(fintek);
467 spin_unlock_irqrestore(&fintek->fintek_lock, flags);
468}
469
470/* Allocate memory, probe hardware, and initialize everything */
471static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
472{
473 struct fintek_dev *fintek;
474 struct rc_dev *rdev;
475 int ret = -ENOMEM;
476
477 fintek = kzalloc(sizeof(struct fintek_dev), GFP_KERNEL);
478 if (!fintek)
479 return ret;
480
481 /* input device for IR remote (and tx) */
482 rdev = rc_allocate_device();
483 if (!rdev)
484 goto failure;
485
486 ret = -ENODEV;
487 /* validate pnp resources */
488 if (!pnp_port_valid(pdev, 0)) {
489 dev_err(&pdev->dev, "IR PNP Port not valid!\n");
490 goto failure;
491 }
492
493 if (!pnp_irq_valid(pdev, 0)) {
494 dev_err(&pdev->dev, "IR PNP IRQ not valid!\n");
495 goto failure;
496 }
497
498 fintek->cir_addr = pnp_port_start(pdev, 0);
499 fintek->cir_irq = pnp_irq(pdev, 0);
500 fintek->cir_port_len = pnp_port_len(pdev, 0);
501
502 fintek->cr_ip = CR_INDEX_PORT;
503 fintek->cr_dp = CR_DATA_PORT;
504
505 spin_lock_init(&fintek->fintek_lock);
506
507 ret = -EBUSY;
508 /* now claim resources */
509 if (!request_region(fintek->cir_addr,
510 fintek->cir_port_len, FINTEK_DRIVER_NAME))
511 goto failure;
512
513 if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED,
514 FINTEK_DRIVER_NAME, (void *)fintek))
515 goto failure;
516
517 pnp_set_drvdata(pdev, fintek);
518 fintek->pdev = pdev;
519
520 ret = fintek_hw_detect(fintek);
521 if (ret)
522 goto failure;
523
524 /* Initialize CIR & CIR Wake Logical Devices */
525 fintek_config_mode_enable(fintek);
526 fintek_cir_ldev_init(fintek);
527 fintek_config_mode_disable(fintek);
528
529 /* Initialize CIR & CIR Wake Config Registers */
530 fintek_cir_regs_init(fintek);
531
532 /* Set up the rc device */
533 rdev->priv = fintek;
534 rdev->driver_type = RC_DRIVER_IR_RAW;
535 rdev->allowed_protos = RC_TYPE_ALL;
536 rdev->open = fintek_open;
537 rdev->close = fintek_close;
538 rdev->input_name = FINTEK_DESCRIPTION;
539 rdev->input_phys = "fintek/cir0";
540 rdev->input_id.bustype = BUS_HOST;
541 rdev->input_id.vendor = VENDOR_ID_FINTEK;
542 rdev->input_id.product = fintek->chip_major;
543 rdev->input_id.version = fintek->chip_minor;
544 rdev->dev.parent = &pdev->dev;
545 rdev->driver_name = FINTEK_DRIVER_NAME;
546 rdev->map_name = RC_MAP_RC6_MCE;
547 rdev->timeout = US_TO_NS(1000);
548 /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
549 rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
550
551 ret = rc_register_device(rdev);
552 if (ret)
553 goto failure;
554
555 device_init_wakeup(&pdev->dev, true);
556 fintek->rdev = rdev;
557 fit_pr(KERN_NOTICE, "driver has been successfully loaded\n");
558 if (debug)
559 cir_dump_regs(fintek);
560
561 return 0;
562
563failure:
564 if (fintek->cir_irq)
565 free_irq(fintek->cir_irq, fintek);
566 if (fintek->cir_addr)
567 release_region(fintek->cir_addr, fintek->cir_port_len);
568
569 rc_free_device(rdev);
570 kfree(fintek);
571
572 return ret;
573}
574
575static void __devexit fintek_remove(struct pnp_dev *pdev)
576{
577 struct fintek_dev *fintek = pnp_get_drvdata(pdev);
578 unsigned long flags;
579
580 spin_lock_irqsave(&fintek->fintek_lock, flags);
581 /* disable CIR */
582 fintek_disable_cir(fintek);
583 fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
584 /* enable CIR Wake (for IR power-on) */
585 fintek_enable_wake(fintek);
586 spin_unlock_irqrestore(&fintek->fintek_lock, flags);
587
588 /* free resources */
589 free_irq(fintek->cir_irq, fintek);
590 release_region(fintek->cir_addr, fintek->cir_port_len);
591
592 rc_unregister_device(fintek->rdev);
593
594 kfree(fintek);
595}
596
597static int fintek_suspend(struct pnp_dev *pdev, pm_message_t state)
598{
599 struct fintek_dev *fintek = pnp_get_drvdata(pdev);
600
601 fit_dbg("%s called", __func__);
602
603 /* disable all CIR interrupts */
604 fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
605
606 fintek_config_mode_enable(fintek);
607
608 /* disable cir logical dev */
609 fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
610 fintek_cr_write(fintek, LOGICAL_DEV_DISABLE, CIR_CR_DEV_EN);
611
612 fintek_config_mode_disable(fintek);
613
614 /* make sure wake is enabled */
615 fintek_enable_wake(fintek);
616
617 return 0;
618}
619
620static int fintek_resume(struct pnp_dev *pdev)
621{
622 int ret = 0;
623 struct fintek_dev *fintek = pnp_get_drvdata(pdev);
624
625 fit_dbg("%s called", __func__);
626
627 /* open interrupt */
628 fintek_enable_cir_irq(fintek);
629
630 /* Enable CIR logical device */
631 fintek_config_mode_enable(fintek);
632 fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
633 fintek_cr_write(fintek, LOGICAL_DEV_ENABLE, CIR_CR_DEV_EN);
634
635 fintek_config_mode_disable(fintek);
636
637 fintek_cir_regs_init(fintek);
638
639 return ret;
640}
641
642static void fintek_shutdown(struct pnp_dev *pdev)
643{
644 struct fintek_dev *fintek = pnp_get_drvdata(pdev);
645 fintek_enable_wake(fintek);
646}
647
648static const struct pnp_device_id fintek_ids[] = {
649 { "FIT0002", 0 }, /* CIR */
650 { "", 0 },
651};
652
653static struct pnp_driver fintek_driver = {
654 .name = FINTEK_DRIVER_NAME,
655 .id_table = fintek_ids,
656 .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
657 .probe = fintek_probe,
658 .remove = __devexit_p(fintek_remove),
659 .suspend = fintek_suspend,
660 .resume = fintek_resume,
661 .shutdown = fintek_shutdown,
662};
663
664int fintek_init(void)
665{
666 return pnp_register_driver(&fintek_driver);
667}
668
669void fintek_exit(void)
670{
671 pnp_unregister_driver(&fintek_driver);
672}
673
674module_param(debug, int, S_IRUGO | S_IWUSR);
675MODULE_PARM_DESC(debug, "Enable debugging output");
676
677MODULE_DEVICE_TABLE(pnp, fintek_ids);
678MODULE_DESCRIPTION(FINTEK_DESCRIPTION " driver");
679
680MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
681MODULE_LICENSE("GPL");
682
683module_init(fintek_init);
684module_exit(fintek_exit);
diff --git a/drivers/media/rc/fintek-cir.h b/drivers/media/rc/fintek-cir.h
new file mode 100644
index 000000000000..1b10b2011f5e
--- /dev/null
+++ b/drivers/media/rc/fintek-cir.h
@@ -0,0 +1,243 @@
1/*
2 * Driver for Feature Integration Technology Inc. (aka Fintek) LPC CIR
3 *
4 * Copyright (C) 2011 Jarod Wilson <jarod@redhat.com>
5 *
6 * Special thanks to Fintek for providing hardware and spec sheets.
7 * This driver is based upon the nuvoton, ite and ene drivers for
8 * similar hardware.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
23 * USA
24 */
25
26#include <linux/spinlock.h>
27#include <linux/ioctl.h>
28
29/* platform driver name to register */
30#define FINTEK_DRIVER_NAME "fintek-cir"
31#define FINTEK_DESCRIPTION "Fintek LPC SuperIO Consumer IR Transceiver"
32#define VENDOR_ID_FINTEK 0x1934
33
34
35/* debugging module parameter */
36static int debug;
37
38#define fit_pr(level, text, ...) \
39 printk(level KBUILD_MODNAME ": " text, ## __VA_ARGS__)
40
41#define fit_dbg(text, ...) \
42 if (debug) \
43 printk(KERN_DEBUG \
44 KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__)
45
46#define fit_dbg_verbose(text, ...) \
47 if (debug > 1) \
48 printk(KERN_DEBUG \
49 KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__)
50
51#define fit_dbg_wake(text, ...) \
52 if (debug > 2) \
53 printk(KERN_DEBUG \
54 KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__)
55
56
57#define TX_BUF_LEN 256
58#define RX_BUF_LEN 32
59
60struct fintek_dev {
61 struct pnp_dev *pdev;
62 struct rc_dev *rdev;
63
64 spinlock_t fintek_lock;
65
66 /* for rx */
67 u8 buf[RX_BUF_LEN];
68 unsigned int pkts;
69
70 struct {
71 spinlock_t lock;
72 u8 buf[TX_BUF_LEN];
73 unsigned int buf_count;
74 unsigned int cur_buf_num;
75 wait_queue_head_t queue;
76 } tx;
77
78 /* Config register index/data port pair */
79 u8 cr_ip;
80 u8 cr_dp;
81
82 /* hardware I/O settings */
83 unsigned long cir_addr;
84 int cir_irq;
85 int cir_port_len;
86
87 /* hardware id */
88 u8 chip_major;
89 u8 chip_minor;
90 u16 chip_vendor;
91
92 /* hardware features */
93 bool hw_learning_capable;
94 bool hw_tx_capable;
95
96 /* rx settings */
97 bool learning_enabled;
98 bool carrier_detect_enabled;
99
100 enum {
101 CMD_HEADER = 0,
102 SUBCMD,
103 CMD_DATA,
104 PARSE_IRDATA,
105 } parser_state;
106
107 u8 cmd, rem;
108
109 /* carrier period = 1 / frequency */
110 u32 carrier;
111};
112
113/* buffer packet constants, largely identical to mceusb.c */
114#define BUF_PULSE_BIT 0x80
115#define BUF_LEN_MASK 0x1f
116#define BUF_SAMPLE_MASK 0x7f
117
118#define BUF_COMMAND_HEADER 0x9f
119#define BUF_COMMAND_MASK 0xe0
120#define BUF_COMMAND_NULL 0x00
121#define BUF_HW_CMD_HEADER 0xff
122#define BUF_CMD_G_REVISION 0x0b
123#define BUF_CMD_S_CARRIER 0x06
124#define BUF_CMD_S_TIMEOUT 0x0c
125#define BUF_CMD_SIG_END 0x01
126#define BUF_CMD_S_TXMASK 0x08
127#define BUF_CMD_S_RXSENSOR 0x14
128#define BUF_RSP_PULSE_COUNT 0x15
129
130#define CIR_SAMPLE_PERIOD 50
131
132/*
133 * Configuration Register:
134 * Index Port
135 * Data Port
136 */
137#define CR_INDEX_PORT 0x2e
138#define CR_DATA_PORT 0x2f
139
140/* Possible alternate values, depends on how the chip is wired */
141#define CR_INDEX_PORT2 0x4e
142#define CR_DATA_PORT2 0x4f
143
144/*
145 * GCR_CONFIG_PORT_SEL bit 4 specifies which Index Port value is
146 * active. 1 = 0x4e, 0 = 0x2e
147 */
148#define PORT_SEL_PORT_4E_EN 0x10
149
150/* Extended Function Mode enable/disable magic values */
151#define CONFIG_REG_ENABLE 0x87
152#define CONFIG_REG_DISABLE 0xaa
153
154/* Chip IDs found in CR_CHIP_ID_{HI,LO} */
155#define CHIP_ID_HIGH_F71809U 0x04
156#define CHIP_ID_LOW_F71809U 0x08
157
158/*
159 * Global control regs we need to care about:
160 * Global Control def.
161 * Register name addr val. */
162#define GCR_SOFTWARE_RESET 0x02 /* 0x00 */
163#define GCR_LOGICAL_DEV_NO 0x07 /* 0x00 */
164#define GCR_CHIP_ID_HI 0x20 /* 0x04 */
165#define GCR_CHIP_ID_LO 0x21 /* 0x08 */
166#define GCR_VENDOR_ID_HI 0x23 /* 0x19 */
167#define GCR_VENDOR_ID_LO 0x24 /* 0x34 */
168#define GCR_CONFIG_PORT_SEL 0x25 /* 0x01 */
169#define GCR_KBMOUSE_WAKEUP 0x27
170
171#define LOGICAL_DEV_DISABLE 0x00
172#define LOGICAL_DEV_ENABLE 0x01
173
174/* Logical device number of the CIR function */
175#define LOGICAL_DEV_CIR 0x05
176
177/* CIR Logical Device (LDN 0x08) config registers */
178#define CIR_CR_COMMAND_INDEX 0x04
179#define CIR_CR_IRCS 0x05 /* Before host writes command to IR, host
180 must set to 1. When host finshes write
181 command to IR, host must clear to 0. */
182#define CIR_CR_COMMAND_DATA 0x06 /* Host read or write comand data */
183#define CIR_CR_CLASS 0x07 /* 0xff = rx-only, 0x66 = rx + 2 tx,
184 0x33 = rx + 1 tx */
185#define CIR_CR_DEV_EN 0x30 /* bit0 = 1 enables CIR */
186#define CIR_CR_BASE_ADDR_HI 0x60 /* MSB of CIR IO base addr */
187#define CIR_CR_BASE_ADDR_LO 0x61 /* LSB of CIR IO base addr */
188#define CIR_CR_IRQ_SEL 0x70 /* bits3-0 store CIR IRQ */
189#define CIR_CR_PSOUT_STATUS 0xf1
190#define CIR_CR_WAKE_KEY3_ADDR 0xf8
191#define CIR_CR_WAKE_KEY3_CODE 0xf9
192#define CIR_CR_WAKE_KEY3_DC 0xfa
193#define CIR_CR_WAKE_CONTROL 0xfb
194#define CIR_CR_WAKE_KEY12_ADDR 0xfc
195#define CIR_CR_WAKE_KEY4_ADDR 0xfd
196#define CIR_CR_WAKE_KEY5_ADDR 0xfe
197
198#define CLASS_RX_ONLY 0xff
199#define CLASS_RX_2TX 0x66
200#define CLASS_RX_1TX 0x33
201
202/* CIR device registers */
203#define CIR_STATUS 0x00
204#define CIR_RX_DATA 0x01
205#define CIR_TX_CONTROL 0x02
206#define CIR_TX_DATA 0x03
207#define CIR_CONTROL 0x04
208
209/* Bits to enable CIR wake */
210#define LOGICAL_DEV_ACPI 0x01
211#define LDEV_ACPI_WAKE_EN_REG 0xe8
212#define ACPI_WAKE_EN_CIR_BIT 0x04
213
214#define LDEV_ACPI_PME_EN_REG 0xf0
215#define LDEV_ACPI_PME_CLR_REG 0xf1
216#define ACPI_PME_CIR_BIT 0x02
217
218#define LDEV_ACPI_STATE_REG 0xf4
219#define ACPI_STATE_CIR_BIT 0x20
220
221/*
222 * CIR status register (0x00):
223 * 7 - CIR_IRQ_EN (1 = enable CIR IRQ, 0 = disable)
224 * 3 - TX_FINISH (1 when TX finished, write 1 to clear)
225 * 2 - TX_UNDERRUN (1 on TX underrun, write 1 to clear)
226 * 1 - RX_TIMEOUT (1 on RX timeout, write 1 to clear)
227 * 0 - RX_RECEIVE (1 on RX receive, write 1 to clear)
228 */
229#define CIR_STATUS_IRQ_EN 0x80
230#define CIR_STATUS_TX_FINISH 0x08
231#define CIR_STATUS_TX_UNDERRUN 0x04
232#define CIR_STATUS_RX_TIMEOUT 0x02
233#define CIR_STATUS_RX_RECEIVE 0x01
234#define CIR_STATUS_IRQ_MASK 0x0f
235
236/*
237 * CIR TX control register (0x02):
238 * 7 - TX_START (1 to indicate TX start, auto-cleared when done)
239 * 6 - TX_END (1 to indicate TX data written to TX fifo)
240 */
241#define CIR_TX_CONTROL_TX_START 0x80
242#define CIR_TX_CONTROL_TX_END 0x40
243
diff --git a/drivers/media/rc/keymaps/rc-lme2510.c b/drivers/media/rc/keymaps/rc-lme2510.c
index afae14fd152e..129d3f9a461d 100644
--- a/drivers/media/rc/keymaps/rc-lme2510.c
+++ b/drivers/media/rc/keymaps/rc-lme2510.c
@@ -14,81 +14,81 @@
14 14
15static struct rc_map_table lme2510_rc[] = { 15static struct rc_map_table lme2510_rc[] = {
16 /* Type 1 - 26 buttons */ 16 /* Type 1 - 26 buttons */
17 { 0xef12ba45, KEY_0 }, 17 { 0x10ed45, KEY_0 },
18 { 0xef12a05f, KEY_1 }, 18 { 0x10ed5f, KEY_1 },
19 { 0xef12af50, KEY_2 }, 19 { 0x10ed50, KEY_2 },
20 { 0xef12a25d, KEY_3 }, 20 { 0x10ed5d, KEY_3 },
21 { 0xef12be41, KEY_4 }, 21 { 0x10ed41, KEY_4 },
22 { 0xef12f50a, KEY_5 }, 22 { 0x10ed0a, KEY_5 },
23 { 0xef12bd42, KEY_6 }, 23 { 0x10ed42, KEY_6 },
24 { 0xef12b847, KEY_7 }, 24 { 0x10ed47, KEY_7 },
25 { 0xef12b649, KEY_8 }, 25 { 0x10ed49, KEY_8 },
26 { 0xef12fa05, KEY_9 }, 26 { 0x10ed05, KEY_9 },
27 { 0xef12bc43, KEY_POWER }, 27 { 0x10ed43, KEY_POWER },
28 { 0xef12b946, KEY_SUBTITLE }, 28 { 0x10ed46, KEY_SUBTITLE },
29 { 0xef12f906, KEY_PAUSE }, 29 { 0x10ed06, KEY_PAUSE },
30 { 0xef12fc03, KEY_MEDIA_REPEAT}, 30 { 0x10ed03, KEY_MEDIA_REPEAT},
31 { 0xef12fd02, KEY_PAUSE }, 31 { 0x10ed02, KEY_PAUSE },
32 { 0xef12a15e, KEY_VOLUMEUP }, 32 { 0x10ed5e, KEY_VOLUMEUP },
33 { 0xef12a35c, KEY_VOLUMEDOWN }, 33 { 0x10ed5c, KEY_VOLUMEDOWN },
34 { 0xef12f609, KEY_CHANNELUP }, 34 { 0x10ed09, KEY_CHANNELUP },
35 { 0xef12e51a, KEY_CHANNELDOWN }, 35 { 0x10ed1a, KEY_CHANNELDOWN },
36 { 0xef12e11e, KEY_PLAY }, 36 { 0x10ed1e, KEY_PLAY },
37 { 0xef12e41b, KEY_ZOOM }, 37 { 0x10ed1b, KEY_ZOOM },
38 { 0xef12a659, KEY_MUTE }, 38 { 0x10ed59, KEY_MUTE },
39 { 0xef12a55a, KEY_TV }, 39 { 0x10ed5a, KEY_TV },
40 { 0xef12e718, KEY_RECORD }, 40 { 0x10ed18, KEY_RECORD },
41 { 0xef12f807, KEY_EPG }, 41 { 0x10ed07, KEY_EPG },
42 { 0xef12fe01, KEY_STOP }, 42 { 0x10ed01, KEY_STOP },
43 /* Type 2 - 20 buttons */ 43 /* Type 2 - 20 buttons */
44 { 0xff40ea15, KEY_0 }, 44 { 0xbf15, KEY_0 },
45 { 0xff40f708, KEY_1 }, 45 { 0xbf08, KEY_1 },
46 { 0xff40f609, KEY_2 }, 46 { 0xbf09, KEY_2 },
47 { 0xff40f50a, KEY_3 }, 47 { 0xbf0a, KEY_3 },
48 { 0xff40f30c, KEY_4 }, 48 { 0xbf0c, KEY_4 },
49 { 0xff40f20d, KEY_5 }, 49 { 0xbf0d, KEY_5 },
50 { 0xff40f10e, KEY_6 }, 50 { 0xbf0e, KEY_6 },
51 { 0xff40ef10, KEY_7 }, 51 { 0xbf10, KEY_7 },
52 { 0xff40ee11, KEY_8 }, 52 { 0xbf11, KEY_8 },
53 { 0xff40ed12, KEY_9 }, 53 { 0xbf12, KEY_9 },
54 { 0xff40ff00, KEY_POWER }, 54 { 0xbf00, KEY_POWER },
55 { 0xff40fb04, KEY_MEDIA_REPEAT}, /* Recall */ 55 { 0xbf04, KEY_MEDIA_REPEAT}, /* Recall */
56 { 0xff40e51a, KEY_PAUSE }, /* Timeshift */ 56 { 0xbf1a, KEY_PAUSE }, /* Timeshift */
57 { 0xff40fd02, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */ 57 { 0xbf02, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
58 { 0xff40f906, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/ 58 { 0xbf06, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
59 { 0xff40fe01, KEY_CHANNELUP }, 59 { 0xbf01, KEY_CHANNELUP },
60 { 0xff40fa05, KEY_CHANNELDOWN }, 60 { 0xbf05, KEY_CHANNELDOWN },
61 { 0xff40eb14, KEY_ZOOM }, 61 { 0xbf14, KEY_ZOOM },
62 { 0xff40e718, KEY_RECORD }, 62 { 0xbf18, KEY_RECORD },
63 { 0xff40e916, KEY_STOP }, 63 { 0xbf16, KEY_STOP },
64 /* Type 3 - 20 buttons */ 64 /* Type 3 - 20 buttons */
65 { 0xff00e31c, KEY_0 }, 65 { 0x1c, KEY_0 },
66 { 0xff00f807, KEY_1 }, 66 { 0x07, KEY_1 },
67 { 0xff00ea15, KEY_2 }, 67 { 0x15, KEY_2 },
68 { 0xff00f609, KEY_3 }, 68 { 0x09, KEY_3 },
69 { 0xff00e916, KEY_4 }, 69 { 0x16, KEY_4 },
70 { 0xff00e619, KEY_5 }, 70 { 0x19, KEY_5 },
71 { 0xff00f20d, KEY_6 }, 71 { 0x0d, KEY_6 },
72 { 0xff00f30c, KEY_7 }, 72 { 0x0c, KEY_7 },
73 { 0xff00e718, KEY_8 }, 73 { 0x18, KEY_8 },
74 { 0xff00a15e, KEY_9 }, 74 { 0x5e, KEY_9 },
75 { 0xff00ba45, KEY_POWER }, 75 { 0x45, KEY_POWER },
76 { 0xff00bb44, KEY_MEDIA_REPEAT}, /* Recall */ 76 { 0x44, KEY_MEDIA_REPEAT}, /* Recall */
77 { 0xff00b54a, KEY_PAUSE }, /* Timeshift */ 77 { 0x4a, KEY_PAUSE }, /* Timeshift */
78 { 0xff00b847, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */ 78 { 0x47, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
79 { 0xff00bc43, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/ 79 { 0x43, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
80 { 0xff00b946, KEY_CHANNELUP }, 80 { 0x46, KEY_CHANNELUP },
81 { 0xff00bf40, KEY_CHANNELDOWN }, 81 { 0x40, KEY_CHANNELDOWN },
82 { 0xff00f708, KEY_ZOOM }, 82 { 0x08, KEY_ZOOM },
83 { 0xff00bd42, KEY_RECORD }, 83 { 0x42, KEY_RECORD },
84 { 0xff00a55a, KEY_STOP }, 84 { 0x5a, KEY_STOP },
85}; 85};
86 86
87static struct rc_map_list lme2510_map = { 87static struct rc_map_list lme2510_map = {
88 .map = { 88 .map = {
89 .scan = lme2510_rc, 89 .scan = lme2510_rc,
90 .size = ARRAY_SIZE(lme2510_rc), 90 .size = ARRAY_SIZE(lme2510_rc),
91 .rc_type = RC_TYPE_UNKNOWN, 91 .rc_type = RC_TYPE_NEC,
92 .name = RC_MAP_LME2510, 92 .name = RC_MAP_LME2510,
93 } 93 }
94}; 94};
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 3be180b3ba27..bb53de7fe408 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -687,7 +687,7 @@ config VIDEO_HEXIUM_GEMINI
687 687
688config VIDEO_TIMBERDALE 688config VIDEO_TIMBERDALE
689 tristate "Support for timberdale Video In/LogiWIN" 689 tristate "Support for timberdale Video In/LogiWIN"
690 depends on VIDEO_V4L2 && I2C 690 depends on VIDEO_V4L2 && I2C && DMADEVICES
691 select DMA_ENGINE 691 select DMA_ENGINE
692 select TIMB_DMA 692 select TIMB_DMA
693 select VIDEO_ADV7180 693 select VIDEO_ADV7180
@@ -757,6 +757,8 @@ config VIDEO_NOON010PC30
757 ---help--- 757 ---help---
758 This driver supports NOON010PC30 CIF camera from Siliconfile 758 This driver supports NOON010PC30 CIF camera from Siliconfile
759 759
760source "drivers/media/video/m5mols/Kconfig"
761
760config VIDEO_OMAP3 762config VIDEO_OMAP3
761 tristate "OMAP 3 Camera support (EXPERIMENTAL)" 763 tristate "OMAP 3 Camera support (EXPERIMENTAL)"
762 select OMAP_IOMMU 764 select OMAP_IOMMU
@@ -952,7 +954,7 @@ config VIDEO_SAMSUNG_S5P_FIMC
952 954
953config VIDEO_S5P_MIPI_CSIS 955config VIDEO_S5P_MIPI_CSIS
954 tristate "Samsung S5P and EXYNOS4 MIPI CSI receiver driver" 956 tristate "Samsung S5P and EXYNOS4 MIPI CSI receiver driver"
955 depends on VIDEO_V4L2 && PM_RUNTIME && VIDEO_V4L2_SUBDEV_API 957 depends on VIDEO_V4L2 && PM_RUNTIME && PLAT_S5P && VIDEO_V4L2_SUBDEV_API
956 ---help--- 958 ---help---
957 This is a v4l2 driver for Samsung S5P/EXYNOS4 MIPI-CSI receiver. 959 This is a v4l2 driver for Samsung S5P/EXYNOS4 MIPI-CSI receiver.
958 960
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 9519160c2e01..f0fecd6f6a33 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -69,6 +69,7 @@ obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
69obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o 69obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o
70obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o 70obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
71obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o 71obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o
72obj-$(CONFIG_VIDEO_M5MOLS) += m5mols/
72 73
73obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o 74obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o
74obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o 75obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 0073a8c55336..40eb6326e48a 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -438,7 +438,7 @@ static int cpia2_querycap(struct file *file, void *fh, struct v4l2_capability *v
438 strcat(vc->card, " (676/"); 438 strcat(vc->card, " (676/");
439 break; 439 break;
440 default: 440 default:
441 strcat(vc->card, " (???/"); 441 strcat(vc->card, " (XXX/");
442 break; 442 break;
443 } 443 }
444 switch (cam->params.version.sensor_flags) { 444 switch (cam->params.version.sensor_flags) {
@@ -458,7 +458,7 @@ static int cpia2_querycap(struct file *file, void *fh, struct v4l2_capability *v
458 strcat(vc->card, "500)"); 458 strcat(vc->card, "500)");
459 break; 459 break;
460 default: 460 default:
461 strcat(vc->card, "???)"); 461 strcat(vc->card, "XXX)");
462 break; 462 break;
463 } 463 }
464 464
diff --git a/drivers/media/video/cx231xx/cx231xx-avcore.c b/drivers/media/video/cx231xx/cx231xx-avcore.c
index 280df43ca446..8d7813415760 100644
--- a/drivers/media/video/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/video/cx231xx/cx231xx-avcore.c
@@ -1354,7 +1354,7 @@ void cx231xx_dump_SC_reg(struct cx231xx *dev)
1354{ 1354{
1355 u8 value[4] = { 0, 0, 0, 0 }; 1355 u8 value[4] = { 0, 0, 0, 0 };
1356 int status = 0; 1356 int status = 0;
1357 cx231xx_info("cx231xx_dump_SC_reg %s!\n", __TIME__); 1357 cx231xx_info("cx231xx_dump_SC_reg!\n");
1358 1358
1359 status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT, 1359 status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT,
1360 value, 4); 1360 value, 4);
diff --git a/drivers/media/video/gspca/kinect.c b/drivers/media/video/gspca/kinect.c
index 66671a4092e4..26fc206f095e 100644
--- a/drivers/media/video/gspca/kinect.c
+++ b/drivers/media/video/gspca/kinect.c
@@ -34,7 +34,7 @@ MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
34MODULE_DESCRIPTION("GSPCA/Kinect Sensor Device USB Camera Driver"); 34MODULE_DESCRIPTION("GSPCA/Kinect Sensor Device USB Camera Driver");
35MODULE_LICENSE("GPL"); 35MODULE_LICENSE("GPL");
36 36
37#ifdef DEBUG 37#ifdef GSPCA_DEBUG
38int gspca_debug = D_ERR | D_PROBE | D_CONF | D_STREAM | D_FRAM | D_PACK | 38int gspca_debug = D_ERR | D_PROBE | D_CONF | D_STREAM | D_FRAM | D_PACK |
39 D_USBI | D_USBO | D_V4L2; 39 D_USBI | D_USBO | D_V4L2;
40#endif 40#endif
diff --git a/drivers/media/video/m5mols/Kconfig b/drivers/media/video/m5mols/Kconfig
new file mode 100644
index 000000000000..302dc3d70193
--- /dev/null
+++ b/drivers/media/video/m5mols/Kconfig
@@ -0,0 +1,5 @@
1config VIDEO_M5MOLS
2 tristate "Fujitsu M-5MOLS 8MP sensor support"
3 depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
4 ---help---
5 This driver supports Fujitsu M-5MOLS camera sensor with ISP
diff --git a/drivers/media/video/m5mols/Makefile b/drivers/media/video/m5mols/Makefile
new file mode 100644
index 000000000000..0a44e028edc7
--- /dev/null
+++ b/drivers/media/video/m5mols/Makefile
@@ -0,0 +1,3 @@
1m5mols-objs := m5mols_core.o m5mols_controls.o m5mols_capture.o
2
3obj-$(CONFIG_VIDEO_M5MOLS) += m5mols.o
diff --git a/drivers/media/video/m5mols/m5mols.h b/drivers/media/video/m5mols/m5mols.h
new file mode 100644
index 000000000000..10b55c854487
--- /dev/null
+++ b/drivers/media/video/m5mols/m5mols.h
@@ -0,0 +1,296 @@
1/*
2 * Header for M-5MOLS 8M Pixel camera sensor with ISP
3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * Author: HeungJun Kim, riverful.kim@samsung.com
6 *
7 * Copyright (C) 2009 Samsung Electronics Co., Ltd.
8 * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#ifndef M5MOLS_H
17#define M5MOLS_H
18
19#include <media/v4l2-subdev.h>
20#include "m5mols_reg.h"
21
22extern int m5mols_debug;
23
24#define to_m5mols(__sd) container_of(__sd, struct m5mols_info, sd)
25
26#define to_sd(__ctrl) \
27 (&container_of(__ctrl->handler, struct m5mols_info, handle)->sd)
28
29enum m5mols_restype {
30 M5MOLS_RESTYPE_MONITOR,
31 M5MOLS_RESTYPE_CAPTURE,
32 M5MOLS_RESTYPE_MAX,
33};
34
35/**
36 * struct m5mols_resolution - structure for the resolution
37 * @type: resolution type according to the pixel code
38 * @width: width of the resolution
39 * @height: height of the resolution
40 * @reg: resolution preset register value
41 */
42struct m5mols_resolution {
43 u8 reg;
44 enum m5mols_restype type;
45 u16 width;
46 u16 height;
47};
48
49/**
50 * struct m5mols_exif - structure for the EXIF information of M-5MOLS
51 * @exposure_time: exposure time register value
52 * @shutter_speed: speed of the shutter register value
53 * @aperture: aperture register value
54 * @exposure_bias: it calls also EV bias
55 * @iso_speed: ISO register value
56 * @flash: status register value of the flash
57 * @sdr: status register value of the Subject Distance Range
58 * @qval: not written exact meaning in document
59 */
60struct m5mols_exif {
61 u32 exposure_time;
62 u32 shutter_speed;
63 u32 aperture;
64 u32 brightness;
65 u32 exposure_bias;
66 u16 iso_speed;
67 u16 flash;
68 u16 sdr;
69 u16 qval;
70};
71
72/**
73 * struct m5mols_capture - Structure for the capture capability
74 * @exif: EXIF information
75 * @main: size in bytes of the main image
76 * @thumb: size in bytes of the thumb image, if it was accompanied
77 * @total: total size in bytes of the produced image
78 */
79struct m5mols_capture {
80 struct m5mols_exif exif;
81 u32 main;
82 u32 thumb;
83 u32 total;
84};
85
86/**
87 * struct m5mols_scenemode - structure for the scenemode capability
88 * @metering: metering light register value
89 * @ev_bias: EV bias register value
90 * @wb_mode: mode which means the WhiteBalance is Auto or Manual
91 * @wb_preset: whitebalance preset register value in the Manual mode
92 * @chroma_en: register value whether the Chroma capability is enabled or not
93 * @chroma_lvl: chroma's level register value
94 * @edge_en: register value Whether the Edge capability is enabled or not
95 * @edge_lvl: edge's level register value
96 * @af_range: Auto Focus's range
97 * @fd_mode: Face Detection mode
98 * @mcc: Multi-axis Color Conversion which means emotion color
99 * @light: status of the Light
100 * @flash: status of the Flash
101 * @tone: Tone color which means Contrast
102 * @iso: ISO register value
103 * @capt_mode: Mode of the Image Stabilization while the camera capturing
104 * @wdr: Wide Dynamic Range register value
105 *
106 * The each value according to each scenemode is recommended in the documents.
107 */
108struct m5mols_scenemode {
109 u32 metering;
110 u32 ev_bias;
111 u32 wb_mode;
112 u32 wb_preset;
113 u32 chroma_en;
114 u32 chroma_lvl;
115 u32 edge_en;
116 u32 edge_lvl;
117 u32 af_range;
118 u32 fd_mode;
119 u32 mcc;
120 u32 light;
121 u32 flash;
122 u32 tone;
123 u32 iso;
124 u32 capt_mode;
125 u32 wdr;
126};
127
128/**
129 * struct m5mols_version - firmware version information
130 * @customer: customer information
131 * @project: version of project information according to customer
132 * @fw: firmware revision
133 * @hw: hardware revision
134 * @param: version of the parameter
135 * @awb: Auto WhiteBalance algorithm version
136 * @str: information about manufacturer and packaging vendor
137 * @af: Auto Focus version
138 *
139 * The register offset starts the customer version at 0x0, and it ends
140 * the awb version at 0x09. The customer, project information occupies 1 bytes
141 * each. And also the fw, hw, param, awb each requires 2 bytes. The str is
142 * unique string associated with firmware's version. It includes information
143 * about manufacturer and the vendor of the sensor's packaging. The least
144 * significant 2 bytes of the string indicate packaging manufacturer.
145 */
146#define VERSION_STRING_SIZE 22
147struct m5mols_version {
148 u8 customer;
149 u8 project;
150 u16 fw;
151 u16 hw;
152 u16 param;
153 u16 awb;
154 u8 str[VERSION_STRING_SIZE];
155 u8 af;
156};
157#define VERSION_SIZE sizeof(struct m5mols_version)
158
159/**
160 * struct m5mols_info - M-5MOLS driver data structure
161 * @pdata: platform data
162 * @sd: v4l-subdev instance
163 * @pad: media pad
164 * @ffmt: current fmt according to resolution type
165 * @res_type: current resolution type
166 * @code: current code
167 * @irq_waitq: waitqueue for the capture
168 * @work_irq: workqueue for the IRQ
169 * @flags: state variable for the interrupt handler
170 * @handle: control handler
171 * @autoexposure: Auto Exposure control
172 * @exposure: Exposure control
173 * @autowb: Auto White Balance control
174 * @colorfx: Color effect control
175 * @saturation: Saturation control
176 * @zoom: Zoom control
177 * @ver: information of the version
178 * @cap: the capture mode attributes
179 * @power: current sensor's power status
180 * @ctrl_sync: true means all controls of the sensor are initialized
181 * @int_capture: true means the capture interrupt is issued once
182 * @lock_ae: true means the Auto Exposure is locked
183 * @lock_awb: true means the Aut WhiteBalance is locked
184 * @resolution: register value for current resolution
185 * @interrupt: register value for current interrupt status
186 * @mode: register value for current operation mode
187 * @mode_save: register value for current operation mode for saving
188 * @set_power: optional power callback to the board code
189 */
190struct m5mols_info {
191 const struct m5mols_platform_data *pdata;
192 struct v4l2_subdev sd;
193 struct media_pad pad;
194 struct v4l2_mbus_framefmt ffmt[M5MOLS_RESTYPE_MAX];
195 int res_type;
196 enum v4l2_mbus_pixelcode code;
197 wait_queue_head_t irq_waitq;
198 struct work_struct work_irq;
199 unsigned long flags;
200
201 struct v4l2_ctrl_handler handle;
202 /* Autoexposure/exposure control cluster */
203 struct {
204 struct v4l2_ctrl *autoexposure;
205 struct v4l2_ctrl *exposure;
206 };
207 struct v4l2_ctrl *autowb;
208 struct v4l2_ctrl *colorfx;
209 struct v4l2_ctrl *saturation;
210 struct v4l2_ctrl *zoom;
211
212 struct m5mols_version ver;
213 struct m5mols_capture cap;
214 bool power;
215 bool ctrl_sync;
216 bool lock_ae;
217 bool lock_awb;
218 u8 resolution;
219 u32 interrupt;
220 u32 mode;
221 u32 mode_save;
222 int (*set_power)(struct device *dev, int on);
223};
224
225#define ST_CAPT_IRQ 0
226
227#define is_powered(__info) (__info->power)
228#define is_ctrl_synced(__info) (__info->ctrl_sync)
229#define is_available_af(__info) (__info->ver.af)
230#define is_code(__code, __type) (__code == m5mols_default_ffmt[__type].code)
231#define is_manufacturer(__info, __manufacturer) \
232 (__info->ver.str[0] == __manufacturer[0] && \
233 __info->ver.str[1] == __manufacturer[1])
234/*
235 * I2C operation of the M-5MOLS
236 *
237 * The I2C read operation of the M-5MOLS requires 2 messages. The first
238 * message sends the information about the command, command category, and total
239 * message size. The second message is used to retrieve the data specifed in
240 * the first message
241 *
242 * 1st message 2nd message
243 * +-------+---+----------+-----+-------+ +------+------+------+------+
244 * | size1 | R | category | cmd | size2 | | d[0] | d[1] | d[2] | d[3] |
245 * +-------+---+----------+-----+-------+ +------+------+------+------+
246 * - size1: message data size(5 in this case)
247 * - size2: desired buffer size of the 2nd message
248 * - d[0..3]: according to size2
249 *
250 * The I2C write operation needs just one message. The message includes
251 * category, command, total size, and desired data.
252 *
253 * 1st message
254 * +-------+---+----------+-----+------+------+------+------+
255 * | size1 | W | category | cmd | d[0] | d[1] | d[2] | d[3] |
256 * +-------+---+----------+-----+------+------+------+------+
257 * - d[0..3]: according to size1
258 */
259int m5mols_read(struct v4l2_subdev *sd, u32 reg_comb, u32 *val);
260int m5mols_write(struct v4l2_subdev *sd, u32 reg_comb, u32 val);
261int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 value);
262
263/*
264 * Mode operation of the M-5MOLS
265 *
266 * Changing the mode of the M-5MOLS is needed right executing order.
267 * There are three modes(PARAMETER, MONITOR, CAPTURE) which can be changed
268 * by user. There are various categories associated with each mode.
269 *
270 * +============================================================+
271 * | mode | category |
272 * +============================================================+
273 * | FLASH | FLASH(only after Stand-by or Power-on) |
274 * | SYSTEM | SYSTEM(only after sensor arm-booting) |
275 * | PARAMETER | PARAMETER |
276 * | MONITOR | MONITOR(preview), Auto Focus, Face Detection |
277 * | CAPTURE | Single CAPTURE, Preview(recording) |
278 * +============================================================+
279 *
280 * The available executing order between each modes are as follows:
281 * PARAMETER <---> MONITOR <---> CAPTURE
282 */
283int m5mols_mode(struct m5mols_info *info, u32 mode);
284
285int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg);
286int m5mols_sync_controls(struct m5mols_info *info);
287int m5mols_start_capture(struct m5mols_info *info);
288int m5mols_do_scenemode(struct m5mols_info *info, u32 mode);
289int m5mols_lock_3a(struct m5mols_info *info, bool lock);
290int m5mols_set_ctrl(struct v4l2_ctrl *ctrl);
291
292/* The firmware function */
293int m5mols_update_fw(struct v4l2_subdev *sd,
294 int (*set_power)(struct m5mols_info *, bool));
295
296#endif /* M5MOLS_H */
diff --git a/drivers/media/video/m5mols/m5mols_capture.c b/drivers/media/video/m5mols/m5mols_capture.c
new file mode 100644
index 000000000000..d71a3903b60f
--- /dev/null
+++ b/drivers/media/video/m5mols/m5mols_capture.c
@@ -0,0 +1,191 @@
1/*
2 * The Capture code for Fujitsu M-5MOLS ISP
3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * Author: HeungJun Kim, riverful.kim@samsung.com
6 *
7 * Copyright (C) 2009 Samsung Electronics Co., Ltd.
8 * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#include <linux/i2c.h>
17#include <linux/slab.h>
18#include <linux/irq.h>
19#include <linux/interrupt.h>
20#include <linux/delay.h>
21#include <linux/version.h>
22#include <linux/gpio.h>
23#include <linux/regulator/consumer.h>
24#include <linux/videodev2.h>
25#include <linux/version.h>
26#include <media/v4l2-ctrls.h>
27#include <media/v4l2-device.h>
28#include <media/v4l2-subdev.h>
29#include <media/m5mols.h>
30
31#include "m5mols.h"
32#include "m5mols_reg.h"
33
34static int m5mols_capture_error_handler(struct m5mols_info *info,
35 int timeout)
36{
37 int ret;
38
39 /* Disable all interrupts and clear relevant interrupt staus bits */
40 ret = m5mols_write(&info->sd, SYSTEM_INT_ENABLE,
41 info->interrupt & ~(REG_INT_CAPTURE));
42 if (ret)
43 return ret;
44
45 if (timeout == 0)
46 return -ETIMEDOUT;
47
48 return 0;
49}
50/**
51 * m5mols_read_rational - I2C read of a rational number
52 *
53 * Read numerator and denominator from registers @addr_num and @addr_den
54 * respectively and return the division result in @val.
55 */
56static int m5mols_read_rational(struct v4l2_subdev *sd, u32 addr_num,
57 u32 addr_den, u32 *val)
58{
59 u32 num, den;
60
61 int ret = m5mols_read(sd, addr_num, &num);
62 if (!ret)
63 ret = m5mols_read(sd, addr_den, &den);
64 if (ret)
65 return ret;
66 *val = den == 0 ? 0 : num / den;
67 return ret;
68}
69
70/**
71 * m5mols_capture_info - Gather captured image information
72 *
73 * For now it gathers only EXIF information and file size.
74 */
75static int m5mols_capture_info(struct m5mols_info *info)
76{
77 struct m5mols_exif *exif = &info->cap.exif;
78 struct v4l2_subdev *sd = &info->sd;
79 int ret;
80
81 ret = m5mols_read_rational(sd, EXIF_INFO_EXPTIME_NU,
82 EXIF_INFO_EXPTIME_DE, &exif->exposure_time);
83 if (ret)
84 return ret;
85 ret = m5mols_read_rational(sd, EXIF_INFO_TV_NU, EXIF_INFO_TV_DE,
86 &exif->shutter_speed);
87 if (ret)
88 return ret;
89 ret = m5mols_read_rational(sd, EXIF_INFO_AV_NU, EXIF_INFO_AV_DE,
90 &exif->aperture);
91 if (ret)
92 return ret;
93 ret = m5mols_read_rational(sd, EXIF_INFO_BV_NU, EXIF_INFO_BV_DE,
94 &exif->brightness);
95 if (ret)
96 return ret;
97 ret = m5mols_read_rational(sd, EXIF_INFO_EBV_NU, EXIF_INFO_EBV_DE,
98 &exif->exposure_bias);
99 if (ret)
100 return ret;
101
102 ret = m5mols_read(sd, EXIF_INFO_ISO, (u32 *)&exif->iso_speed);
103 if (!ret)
104 ret = m5mols_read(sd, EXIF_INFO_FLASH, (u32 *)&exif->flash);
105 if (!ret)
106 ret = m5mols_read(sd, EXIF_INFO_SDR, (u32 *)&exif->sdr);
107 if (!ret)
108 ret = m5mols_read(sd, EXIF_INFO_QVAL, (u32 *)&exif->qval);
109 if (ret)
110 return ret;
111
112 if (!ret)
113 ret = m5mols_read(sd, CAPC_IMAGE_SIZE, &info->cap.main);
114 if (!ret)
115 ret = m5mols_read(sd, CAPC_THUMB_SIZE, &info->cap.thumb);
116 if (!ret)
117 info->cap.total = info->cap.main + info->cap.thumb;
118
119 return ret;
120}
121
122int m5mols_start_capture(struct m5mols_info *info)
123{
124 struct v4l2_subdev *sd = &info->sd;
125 u32 resolution = info->resolution;
126 int timeout;
127 int ret;
128
129 /*
130 * Preparing capture. Setting control & interrupt before entering
131 * capture mode
132 *
133 * 1) change to MONITOR mode for operating control & interrupt
134 * 2) set controls (considering v4l2_control value & lock 3A)
135 * 3) set interrupt
136 * 4) change to CAPTURE mode
137 */
138 ret = m5mols_mode(info, REG_MONITOR);
139 if (!ret)
140 ret = m5mols_sync_controls(info);
141 if (!ret)
142 ret = m5mols_lock_3a(info, true);
143 if (!ret)
144 ret = m5mols_enable_interrupt(sd, REG_INT_CAPTURE);
145 if (!ret)
146 ret = m5mols_mode(info, REG_CAPTURE);
147 if (!ret) {
148 /* Wait for capture interrupt, after changing capture mode */
149 timeout = wait_event_interruptible_timeout(info->irq_waitq,
150 test_bit(ST_CAPT_IRQ, &info->flags),
151 msecs_to_jiffies(2000));
152 if (test_and_clear_bit(ST_CAPT_IRQ, &info->flags))
153 ret = m5mols_capture_error_handler(info, timeout);
154 }
155 if (!ret)
156 ret = m5mols_lock_3a(info, false);
157 if (ret)
158 return ret;
159 /*
160 * Starting capture. Setting capture frame count and resolution and
161 * the format(available format: JPEG, Bayer RAW, YUV).
162 *
163 * 1) select single or multi(enable to 25), format, size
164 * 2) set interrupt
165 * 3) start capture(for main image, now)
166 * 4) get information
167 * 5) notify file size to v4l2 device(e.g, to s5p-fimc v4l2 device)
168 */
169 ret = m5mols_write(sd, CAPC_SEL_FRAME, 1);
170 if (!ret)
171 ret = m5mols_write(sd, CAPP_YUVOUT_MAIN, REG_JPEG);
172 if (!ret)
173 ret = m5mols_write(sd, CAPP_MAIN_IMAGE_SIZE, resolution);
174 if (!ret)
175 ret = m5mols_enable_interrupt(sd, REG_INT_CAPTURE);
176 if (!ret)
177 ret = m5mols_write(sd, CAPC_START, REG_CAP_START_MAIN);
178 if (!ret) {
179 /* Wait for the capture completion interrupt */
180 timeout = wait_event_interruptible_timeout(info->irq_waitq,
181 test_bit(ST_CAPT_IRQ, &info->flags),
182 msecs_to_jiffies(2000));
183 if (test_and_clear_bit(ST_CAPT_IRQ, &info->flags)) {
184 ret = m5mols_capture_info(info);
185 if (!ret)
186 v4l2_subdev_notify(sd, 0, &info->cap.total);
187 }
188 }
189
190 return m5mols_capture_error_handler(info, timeout);
191}
diff --git a/drivers/media/video/m5mols/m5mols_controls.c b/drivers/media/video/m5mols/m5mols_controls.c
new file mode 100644
index 000000000000..817c16fec368
--- /dev/null
+++ b/drivers/media/video/m5mols/m5mols_controls.c
@@ -0,0 +1,299 @@
1/*
2 * Controls for M-5MOLS 8M Pixel camera sensor with ISP
3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * Author: HeungJun Kim, riverful.kim@samsung.com
6 *
7 * Copyright (C) 2009 Samsung Electronics Co., Ltd.
8 * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#include <linux/i2c.h>
17#include <linux/delay.h>
18#include <linux/videodev2.h>
19#include <media/v4l2-ctrls.h>
20
21#include "m5mols.h"
22#include "m5mols_reg.h"
23
24static struct m5mols_scenemode m5mols_default_scenemode[] = {
25 [REG_SCENE_NORMAL] = {
26 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
27 REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
28 REG_AF_NORMAL, REG_FD_OFF,
29 REG_MCC_NORMAL, REG_LIGHT_OFF, REG_FLASH_OFF,
30 5, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
31 },
32 [REG_SCENE_PORTRAIT] = {
33 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
34 REG_CHROMA_ON, 3, REG_EDGE_ON, 4,
35 REG_AF_NORMAL, BIT_FD_EN | BIT_FD_DRAW_FACE_FRAME,
36 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
37 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
38 },
39 [REG_SCENE_LANDSCAPE] = {
40 REG_AE_ALL, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
41 REG_CHROMA_ON, 4, REG_EDGE_ON, 6,
42 REG_AF_NORMAL, REG_FD_OFF,
43 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
44 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
45 },
46 [REG_SCENE_SPORTS] = {
47 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
48 REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
49 REG_AF_NORMAL, REG_FD_OFF,
50 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
51 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
52 },
53 [REG_SCENE_PARTY_INDOOR] = {
54 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
55 REG_CHROMA_ON, 4, REG_EDGE_ON, 5,
56 REG_AF_NORMAL, REG_FD_OFF,
57 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
58 6, REG_ISO_200, REG_CAP_NONE, REG_WDR_OFF,
59 },
60 [REG_SCENE_BEACH_SNOW] = {
61 REG_AE_CENTER, REG_AE_INDEX_10_POS, REG_AWB_AUTO, 0,
62 REG_CHROMA_ON, 4, REG_EDGE_ON, 5,
63 REG_AF_NORMAL, REG_FD_OFF,
64 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
65 6, REG_ISO_50, REG_CAP_NONE, REG_WDR_OFF,
66 },
67 [REG_SCENE_SUNSET] = {
68 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_PRESET,
69 REG_AWB_DAYLIGHT,
70 REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
71 REG_AF_NORMAL, REG_FD_OFF,
72 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
73 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
74 },
75 [REG_SCENE_DAWN_DUSK] = {
76 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_PRESET,
77 REG_AWB_FLUORESCENT_1,
78 REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
79 REG_AF_NORMAL, REG_FD_OFF,
80 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
81 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
82 },
83 [REG_SCENE_FALL] = {
84 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
85 REG_CHROMA_ON, 5, REG_EDGE_ON, 5,
86 REG_AF_NORMAL, REG_FD_OFF,
87 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
88 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
89 },
90 [REG_SCENE_NIGHT] = {
91 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
92 REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
93 REG_AF_NORMAL, REG_FD_OFF,
94 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
95 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
96 },
97 [REG_SCENE_AGAINST_LIGHT] = {
98 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
99 REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
100 REG_AF_NORMAL, REG_FD_OFF,
101 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
102 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
103 },
104 [REG_SCENE_FIRE] = {
105 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
106 REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
107 REG_AF_NORMAL, REG_FD_OFF,
108 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
109 6, REG_ISO_50, REG_CAP_NONE, REG_WDR_OFF,
110 },
111 [REG_SCENE_TEXT] = {
112 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
113 REG_CHROMA_ON, 3, REG_EDGE_ON, 7,
114 REG_AF_MACRO, REG_FD_OFF,
115 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
116 6, REG_ISO_AUTO, REG_CAP_ANTI_SHAKE, REG_WDR_ON,
117 },
118 [REG_SCENE_CANDLE] = {
119 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
120 REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
121 REG_AF_NORMAL, REG_FD_OFF,
122 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
123 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
124 },
125};
126
127/**
128 * m5mols_do_scenemode() - Change current scenemode
129 * @mode: Desired mode of the scenemode
130 *
131 * WARNING: The execution order is important. Do not change the order.
132 */
133int m5mols_do_scenemode(struct m5mols_info *info, u32 mode)
134{
135 struct v4l2_subdev *sd = &info->sd;
136 struct m5mols_scenemode scenemode = m5mols_default_scenemode[mode];
137 int ret;
138
139 if (mode > REG_SCENE_CANDLE)
140 return -EINVAL;
141
142 ret = m5mols_lock_3a(info, false);
143 if (!ret)
144 ret = m5mols_write(sd, AE_EV_PRESET_MONITOR, mode);
145 if (!ret)
146 ret = m5mols_write(sd, AE_EV_PRESET_CAPTURE, mode);
147 if (!ret)
148 ret = m5mols_write(sd, AE_MODE, scenemode.metering);
149 if (!ret)
150 ret = m5mols_write(sd, AE_INDEX, scenemode.ev_bias);
151 if (!ret)
152 ret = m5mols_write(sd, AWB_MODE, scenemode.wb_mode);
153 if (!ret)
154 ret = m5mols_write(sd, AWB_MANUAL, scenemode.wb_preset);
155 if (!ret)
156 ret = m5mols_write(sd, MON_CHROMA_EN, scenemode.chroma_en);
157 if (!ret)
158 ret = m5mols_write(sd, MON_CHROMA_LVL, scenemode.chroma_lvl);
159 if (!ret)
160 ret = m5mols_write(sd, MON_EDGE_EN, scenemode.edge_en);
161 if (!ret)
162 ret = m5mols_write(sd, MON_EDGE_LVL, scenemode.edge_lvl);
163 if (!ret && is_available_af(info))
164 ret = m5mols_write(sd, AF_MODE, scenemode.af_range);
165 if (!ret && is_available_af(info))
166 ret = m5mols_write(sd, FD_CTL, scenemode.fd_mode);
167 if (!ret)
168 ret = m5mols_write(sd, MON_TONE_CTL, scenemode.tone);
169 if (!ret)
170 ret = m5mols_write(sd, AE_ISO, scenemode.iso);
171 if (!ret)
172 ret = m5mols_mode(info, REG_CAPTURE);
173 if (!ret)
174 ret = m5mols_write(sd, CAPP_WDR_EN, scenemode.wdr);
175 if (!ret)
176 ret = m5mols_write(sd, CAPP_MCC_MODE, scenemode.mcc);
177 if (!ret)
178 ret = m5mols_write(sd, CAPP_LIGHT_CTRL, scenemode.light);
179 if (!ret)
180 ret = m5mols_write(sd, CAPP_FLASH_CTRL, scenemode.flash);
181 if (!ret)
182 ret = m5mols_write(sd, CAPC_MODE, scenemode.capt_mode);
183 if (!ret)
184 ret = m5mols_mode(info, REG_MONITOR);
185
186 return ret;
187}
188
189static int m5mols_lock_ae(struct m5mols_info *info, bool lock)
190{
191 int ret = 0;
192
193 if (info->lock_ae != lock)
194 ret = m5mols_write(&info->sd, AE_LOCK,
195 lock ? REG_AE_LOCK : REG_AE_UNLOCK);
196 if (!ret)
197 info->lock_ae = lock;
198
199 return ret;
200}
201
202static int m5mols_lock_awb(struct m5mols_info *info, bool lock)
203{
204 int ret = 0;
205
206 if (info->lock_awb != lock)
207 ret = m5mols_write(&info->sd, AWB_LOCK,
208 lock ? REG_AWB_LOCK : REG_AWB_UNLOCK);
209 if (!ret)
210 info->lock_awb = lock;
211
212 return ret;
213}
214
215/* m5mols_lock_3a() - Lock 3A(Auto Exposure, Auto Whitebalance, Auto Focus) */
216int m5mols_lock_3a(struct m5mols_info *info, bool lock)
217{
218 int ret;
219
220 ret = m5mols_lock_ae(info, lock);
221 if (!ret)
222 ret = m5mols_lock_awb(info, lock);
223 /* Don't need to handle unlocking AF */
224 if (!ret && is_available_af(info) && lock)
225 ret = m5mols_write(&info->sd, AF_EXECUTE, REG_AF_STOP);
226
227 return ret;
228}
229
230/* m5mols_set_ctrl() - The main s_ctrl function called by m5mols_set_ctrl() */
231int m5mols_set_ctrl(struct v4l2_ctrl *ctrl)
232{
233 struct v4l2_subdev *sd = to_sd(ctrl);
234 struct m5mols_info *info = to_m5mols(sd);
235 int ret;
236
237 switch (ctrl->id) {
238 case V4L2_CID_ZOOM_ABSOLUTE:
239 return m5mols_write(sd, MON_ZOOM, ctrl->val);
240
241 case V4L2_CID_EXPOSURE_AUTO:
242 ret = m5mols_lock_ae(info,
243 ctrl->val == V4L2_EXPOSURE_AUTO ? false : true);
244 if (!ret && ctrl->val == V4L2_EXPOSURE_AUTO)
245 ret = m5mols_write(sd, AE_MODE, REG_AE_ALL);
246 if (!ret && ctrl->val == V4L2_EXPOSURE_MANUAL) {
247 int val = info->exposure->val;
248 ret = m5mols_write(sd, AE_MODE, REG_AE_OFF);
249 if (!ret)
250 ret = m5mols_write(sd, AE_MAN_GAIN_MON, val);
251 if (!ret)
252 ret = m5mols_write(sd, AE_MAN_GAIN_CAP, val);
253 }
254 return ret;
255
256 case V4L2_CID_AUTO_WHITE_BALANCE:
257 ret = m5mols_lock_awb(info, ctrl->val ? false : true);
258 if (!ret)
259 ret = m5mols_write(sd, AWB_MODE, ctrl->val ?
260 REG_AWB_AUTO : REG_AWB_PRESET);
261 return ret;
262
263 case V4L2_CID_SATURATION:
264 ret = m5mols_write(sd, MON_CHROMA_LVL, ctrl->val);
265 if (!ret)
266 ret = m5mols_write(sd, MON_CHROMA_EN, REG_CHROMA_ON);
267 return ret;
268
269 case V4L2_CID_COLORFX:
270 /*
271 * This control uses two kinds of registers: normal & color.
272 * The normal effect belongs to category 1, while the color
273 * one belongs to category 2.
274 *
275 * The normal effect uses one register: CAT1_EFFECT.
276 * The color effect uses three registers:
277 * CAT2_COLOR_EFFECT, CAT2_CFIXR, CAT2_CFIXB.
278 */
279 ret = m5mols_write(sd, PARM_EFFECT,
280 ctrl->val == V4L2_COLORFX_NEGATIVE ? REG_EFFECT_NEGA :
281 ctrl->val == V4L2_COLORFX_EMBOSS ? REG_EFFECT_EMBOSS :
282 REG_EFFECT_OFF);
283 if (!ret)
284 ret = m5mols_write(sd, MON_EFFECT,
285 ctrl->val == V4L2_COLORFX_SEPIA ?
286 REG_COLOR_EFFECT_ON : REG_COLOR_EFFECT_OFF);
287 if (!ret)
288 ret = m5mols_write(sd, MON_CFIXR,
289 ctrl->val == V4L2_COLORFX_SEPIA ?
290 REG_CFIXR_SEPIA : 0);
291 if (!ret)
292 ret = m5mols_write(sd, MON_CFIXB,
293 ctrl->val == V4L2_COLORFX_SEPIA ?
294 REG_CFIXB_SEPIA : 0);
295 return ret;
296 }
297
298 return -EINVAL;
299}
diff --git a/drivers/media/video/m5mols/m5mols_core.c b/drivers/media/video/m5mols/m5mols_core.c
new file mode 100644
index 000000000000..76eac26e84ae
--- /dev/null
+++ b/drivers/media/video/m5mols/m5mols_core.c
@@ -0,0 +1,1004 @@
1/*
2 * Driver for M-5MOLS 8M Pixel camera sensor with ISP
3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * Author: HeungJun Kim, riverful.kim@samsung.com
6 *
7 * Copyright (C) 2009 Samsung Electronics Co., Ltd.
8 * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#include <linux/i2c.h>
17#include <linux/slab.h>
18#include <linux/irq.h>
19#include <linux/interrupt.h>
20#include <linux/delay.h>
21#include <linux/version.h>
22#include <linux/gpio.h>
23#include <linux/regulator/consumer.h>
24#include <linux/videodev2.h>
25#include <media/v4l2-ctrls.h>
26#include <media/v4l2-device.h>
27#include <media/v4l2-subdev.h>
28#include <media/m5mols.h>
29
30#include "m5mols.h"
31#include "m5mols_reg.h"
32
33int m5mols_debug;
34module_param(m5mols_debug, int, 0644);
35
36#define MODULE_NAME "M5MOLS"
37#define M5MOLS_I2C_CHECK_RETRY 500
38
39/* The regulator consumer names for external voltage regulators */
40static struct regulator_bulk_data supplies[] = {
41 {
42 .supply = "core", /* ARM core power, 1.2V */
43 }, {
44 .supply = "dig_18", /* digital power 1, 1.8V */
45 }, {
46 .supply = "d_sensor", /* sensor power 1, 1.8V */
47 }, {
48 .supply = "dig_28", /* digital power 2, 2.8V */
49 }, {
50 .supply = "a_sensor", /* analog power */
51 }, {
52 .supply = "dig_12", /* digital power 3, 1.2V */
53 },
54};
55
56static struct v4l2_mbus_framefmt m5mols_default_ffmt[M5MOLS_RESTYPE_MAX] = {
57 [M5MOLS_RESTYPE_MONITOR] = {
58 .width = 1920,
59 .height = 1080,
60 .code = V4L2_MBUS_FMT_VYUY8_2X8,
61 .field = V4L2_FIELD_NONE,
62 .colorspace = V4L2_COLORSPACE_JPEG,
63 },
64 [M5MOLS_RESTYPE_CAPTURE] = {
65 .width = 1920,
66 .height = 1080,
67 .code = V4L2_MBUS_FMT_JPEG_1X8,
68 .field = V4L2_FIELD_NONE,
69 .colorspace = V4L2_COLORSPACE_JPEG,
70 },
71};
72#define SIZE_DEFAULT_FFMT ARRAY_SIZE(m5mols_default_ffmt)
73
74static const struct m5mols_resolution m5mols_reg_res[] = {
75 { 0x01, M5MOLS_RESTYPE_MONITOR, 128, 96 }, /* SUB-QCIF */
76 { 0x03, M5MOLS_RESTYPE_MONITOR, 160, 120 }, /* QQVGA */
77 { 0x05, M5MOLS_RESTYPE_MONITOR, 176, 144 }, /* QCIF */
78 { 0x06, M5MOLS_RESTYPE_MONITOR, 176, 176 },
79 { 0x08, M5MOLS_RESTYPE_MONITOR, 240, 320 }, /* QVGA */
80 { 0x09, M5MOLS_RESTYPE_MONITOR, 320, 240 }, /* QVGA */
81 { 0x0c, M5MOLS_RESTYPE_MONITOR, 240, 400 }, /* WQVGA */
82 { 0x0d, M5MOLS_RESTYPE_MONITOR, 400, 240 }, /* WQVGA */
83 { 0x0e, M5MOLS_RESTYPE_MONITOR, 352, 288 }, /* CIF */
84 { 0x13, M5MOLS_RESTYPE_MONITOR, 480, 360 },
85 { 0x15, M5MOLS_RESTYPE_MONITOR, 640, 360 }, /* qHD */
86 { 0x17, M5MOLS_RESTYPE_MONITOR, 640, 480 }, /* VGA */
87 { 0x18, M5MOLS_RESTYPE_MONITOR, 720, 480 },
88 { 0x1a, M5MOLS_RESTYPE_MONITOR, 800, 480 }, /* WVGA */
89 { 0x1f, M5MOLS_RESTYPE_MONITOR, 800, 600 }, /* SVGA */
90 { 0x21, M5MOLS_RESTYPE_MONITOR, 1280, 720 }, /* HD */
91 { 0x25, M5MOLS_RESTYPE_MONITOR, 1920, 1080 }, /* 1080p */
92 { 0x29, M5MOLS_RESTYPE_MONITOR, 3264, 2448 }, /* 2.63fps 8M */
93 { 0x39, M5MOLS_RESTYPE_MONITOR, 800, 602 }, /* AHS_MON debug */
94
95 { 0x02, M5MOLS_RESTYPE_CAPTURE, 320, 240 }, /* QVGA */
96 { 0x04, M5MOLS_RESTYPE_CAPTURE, 400, 240 }, /* WQVGA */
97 { 0x07, M5MOLS_RESTYPE_CAPTURE, 480, 360 },
98 { 0x08, M5MOLS_RESTYPE_CAPTURE, 640, 360 }, /* qHD */
99 { 0x09, M5MOLS_RESTYPE_CAPTURE, 640, 480 }, /* VGA */
100 { 0x0a, M5MOLS_RESTYPE_CAPTURE, 800, 480 }, /* WVGA */
101 { 0x10, M5MOLS_RESTYPE_CAPTURE, 1280, 720 }, /* HD */
102 { 0x14, M5MOLS_RESTYPE_CAPTURE, 1280, 960 }, /* 1M */
103 { 0x17, M5MOLS_RESTYPE_CAPTURE, 1600, 1200 }, /* 2M */
104 { 0x19, M5MOLS_RESTYPE_CAPTURE, 1920, 1080 }, /* Full-HD */
105 { 0x1a, M5MOLS_RESTYPE_CAPTURE, 2048, 1152 }, /* 3Mega */
106 { 0x1b, M5MOLS_RESTYPE_CAPTURE, 2048, 1536 },
107 { 0x1c, M5MOLS_RESTYPE_CAPTURE, 2560, 1440 }, /* 4Mega */
108 { 0x1d, M5MOLS_RESTYPE_CAPTURE, 2560, 1536 },
109 { 0x1f, M5MOLS_RESTYPE_CAPTURE, 2560, 1920 }, /* 5Mega */
110 { 0x21, M5MOLS_RESTYPE_CAPTURE, 3264, 1836 }, /* 6Mega */
111 { 0x22, M5MOLS_RESTYPE_CAPTURE, 3264, 1960 },
112 { 0x25, M5MOLS_RESTYPE_CAPTURE, 3264, 2448 }, /* 8Mega */
113};
114
115/**
116 * m5mols_swap_byte - an byte array to integer conversion function
117 * @size: size in bytes of I2C packet defined in the M-5MOLS datasheet
118 *
119 * Convert I2C data byte array with performing any required byte
120 * reordering to assure proper values for each data type, regardless
121 * of the architecture endianness.
122 */
123static u32 m5mols_swap_byte(u8 *data, u8 length)
124{
125 if (length == 1)
126 return *data;
127 else if (length == 2)
128 return be16_to_cpu(*((u16 *)data));
129 else
130 return be32_to_cpu(*((u32 *)data));
131}
132
133/**
134 * m5mols_read - I2C read function
135 * @reg: combination of size, category and command for the I2C packet
136 * @val: read value
137 */
138int m5mols_read(struct v4l2_subdev *sd, u32 reg, u32 *val)
139{
140 struct i2c_client *client = v4l2_get_subdevdata(sd);
141 u8 rbuf[M5MOLS_I2C_MAX_SIZE + 1];
142 u8 size = I2C_SIZE(reg);
143 u8 category = I2C_CATEGORY(reg);
144 u8 cmd = I2C_COMMAND(reg);
145 struct i2c_msg msg[2];
146 u8 wbuf[5];
147 int ret;
148
149 if (!client->adapter)
150 return -ENODEV;
151
152 if (size != 1 && size != 2 && size != 4) {
153 v4l2_err(sd, "Wrong data size\n");
154 return -EINVAL;
155 }
156
157 msg[0].addr = client->addr;
158 msg[0].flags = 0;
159 msg[0].len = 5;
160 msg[0].buf = wbuf;
161 wbuf[0] = 5;
162 wbuf[1] = M5MOLS_BYTE_READ;
163 wbuf[2] = category;
164 wbuf[3] = cmd;
165 wbuf[4] = size;
166
167 msg[1].addr = client->addr;
168 msg[1].flags = I2C_M_RD;
169 msg[1].len = size + 1;
170 msg[1].buf = rbuf;
171
172 /* minimum stabilization time */
173 usleep_range(200, 200);
174
175 ret = i2c_transfer(client->adapter, msg, 2);
176 if (ret < 0) {
177 v4l2_err(sd, "read failed: size:%d cat:%02x cmd:%02x. %d\n",
178 size, category, cmd, ret);
179 return ret;
180 }
181
182 *val = m5mols_swap_byte(&rbuf[1], size);
183
184 return 0;
185}
186
187/**
188 * m5mols_write - I2C command write function
189 * @reg: combination of size, category and command for the I2C packet
190 * @val: value to write
191 */
192int m5mols_write(struct v4l2_subdev *sd, u32 reg, u32 val)
193{
194 struct i2c_client *client = v4l2_get_subdevdata(sd);
195 u8 wbuf[M5MOLS_I2C_MAX_SIZE + 4];
196 u8 category = I2C_CATEGORY(reg);
197 u8 cmd = I2C_COMMAND(reg);
198 u8 size = I2C_SIZE(reg);
199 u32 *buf = (u32 *)&wbuf[4];
200 struct i2c_msg msg[1];
201 int ret;
202
203 if (!client->adapter)
204 return -ENODEV;
205
206 if (size != 1 && size != 2 && size != 4) {
207 v4l2_err(sd, "Wrong data size\n");
208 return -EINVAL;
209 }
210
211 msg->addr = client->addr;
212 msg->flags = 0;
213 msg->len = (u16)size + 4;
214 msg->buf = wbuf;
215 wbuf[0] = size + 4;
216 wbuf[1] = M5MOLS_BYTE_WRITE;
217 wbuf[2] = category;
218 wbuf[3] = cmd;
219
220 *buf = m5mols_swap_byte((u8 *)&val, size);
221
222 usleep_range(200, 200);
223
224 ret = i2c_transfer(client->adapter, msg, 1);
225 if (ret < 0) {
226 v4l2_err(sd, "write failed: size:%d cat:%02x cmd:%02x. %d\n",
227 size, category, cmd, ret);
228 return ret;
229 }
230
231 return 0;
232}
233
234int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 mask)
235{
236 u32 busy, i;
237 int ret;
238
239 for (i = 0; i < M5MOLS_I2C_CHECK_RETRY; i++) {
240 ret = m5mols_read(sd, I2C_REG(category, cmd, 1), &busy);
241 if (ret < 0)
242 return ret;
243 if ((busy & mask) == mask)
244 return 0;
245 }
246 return -EBUSY;
247}
248
249/**
250 * m5mols_enable_interrupt - Clear interrupt pending bits and unmask interrupts
251 *
252 * Before writing desired interrupt value the INT_FACTOR register should
253 * be read to clear pending interrupts.
254 */
255int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg)
256{
257 struct m5mols_info *info = to_m5mols(sd);
258 u32 mask = is_available_af(info) ? REG_INT_AF : 0;
259 u32 dummy;
260 int ret;
261
262 ret = m5mols_read(sd, SYSTEM_INT_FACTOR, &dummy);
263 if (!ret)
264 ret = m5mols_write(sd, SYSTEM_INT_ENABLE, reg & ~mask);
265 return ret;
266}
267
268/**
269 * m5mols_reg_mode - Write the mode and check busy status
270 *
271 * It always accompanies a little delay changing the M-5MOLS mode, so it is
272 * needed checking current busy status to guarantee right mode.
273 */
274static int m5mols_reg_mode(struct v4l2_subdev *sd, u32 mode)
275{
276 int ret = m5mols_write(sd, SYSTEM_SYSMODE, mode);
277
278 return ret ? ret : m5mols_busy(sd, CAT_SYSTEM, CAT0_SYSMODE, mode);
279}
280
281/**
282 * m5mols_mode - manage the M-5MOLS's mode
283 * @mode: the required operation mode
284 *
285 * The commands of M-5MOLS are grouped into specific modes. Each functionality
286 * can be guaranteed only when the sensor is operating in mode which which
287 * a command belongs to.
288 */
289int m5mols_mode(struct m5mols_info *info, u32 mode)
290{
291 struct v4l2_subdev *sd = &info->sd;
292 int ret = -EINVAL;
293 u32 reg;
294
295 if (mode < REG_PARAMETER && mode > REG_CAPTURE)
296 return ret;
297
298 ret = m5mols_read(sd, SYSTEM_SYSMODE, &reg);
299 if ((!ret && reg == mode) || ret)
300 return ret;
301
302 switch (reg) {
303 case REG_PARAMETER:
304 ret = m5mols_reg_mode(sd, REG_MONITOR);
305 if (!ret && mode == REG_MONITOR)
306 break;
307 if (!ret)
308 ret = m5mols_reg_mode(sd, REG_CAPTURE);
309 break;
310
311 case REG_MONITOR:
312 if (mode == REG_PARAMETER) {
313 ret = m5mols_reg_mode(sd, REG_PARAMETER);
314 break;
315 }
316
317 ret = m5mols_reg_mode(sd, REG_CAPTURE);
318 break;
319
320 case REG_CAPTURE:
321 ret = m5mols_reg_mode(sd, REG_MONITOR);
322 if (!ret && mode == REG_MONITOR)
323 break;
324 if (!ret)
325 ret = m5mols_reg_mode(sd, REG_PARAMETER);
326 break;
327
328 default:
329 v4l2_warn(sd, "Wrong mode: %d\n", mode);
330 }
331
332 if (!ret)
333 info->mode = mode;
334
335 return ret;
336}
337
338/**
339 * m5mols_get_version - retrieve full revisions information of M-5MOLS
340 *
341 * The version information includes revisions of hardware and firmware,
342 * AutoFocus alghorithm version and the version string.
343 */
344static int m5mols_get_version(struct v4l2_subdev *sd)
345{
346 struct m5mols_info *info = to_m5mols(sd);
347 union {
348 struct m5mols_version ver;
349 u8 bytes[VERSION_SIZE];
350 } version;
351 u32 *value;
352 u8 cmd = CAT0_VER_CUSTOMER;
353 int ret;
354
355 do {
356 value = (u32 *)&version.bytes[cmd];
357 ret = m5mols_read(sd, SYSTEM_CMD(cmd), value);
358 if (ret)
359 return ret;
360 } while (cmd++ != CAT0_VER_AWB);
361
362 do {
363 value = (u32 *)&version.bytes[cmd];
364 ret = m5mols_read(sd, SYSTEM_VER_STRING, value);
365 if (ret)
366 return ret;
367 if (cmd >= VERSION_SIZE - 1)
368 return -EINVAL;
369 } while (version.bytes[cmd++]);
370
371 value = (u32 *)&version.bytes[cmd];
372 ret = m5mols_read(sd, AF_VERSION, value);
373 if (ret)
374 return ret;
375
376 /* store version information swapped for being readable */
377 info->ver = version.ver;
378 info->ver.fw = be16_to_cpu(info->ver.fw);
379 info->ver.hw = be16_to_cpu(info->ver.hw);
380 info->ver.param = be16_to_cpu(info->ver.param);
381 info->ver.awb = be16_to_cpu(info->ver.awb);
382
383 v4l2_info(sd, "Manufacturer\t[%s]\n",
384 is_manufacturer(info, REG_SAMSUNG_ELECTRO) ?
385 "Samsung Electro-Machanics" :
386 is_manufacturer(info, REG_SAMSUNG_OPTICS) ?
387 "Samsung Fiber-Optics" :
388 is_manufacturer(info, REG_SAMSUNG_TECHWIN) ?
389 "Samsung Techwin" : "None");
390 v4l2_info(sd, "Customer/Project\t[0x%02x/0x%02x]\n",
391 info->ver.customer, info->ver.project);
392
393 if (!is_available_af(info))
394 v4l2_info(sd, "No support Auto Focus on this firmware\n");
395
396 return ret;
397}
398
399/**
400 * __find_restype - Lookup M-5MOLS resolution type according to pixel code
401 * @code: pixel code
402 */
403static enum m5mols_restype __find_restype(enum v4l2_mbus_pixelcode code)
404{
405 enum m5mols_restype type = M5MOLS_RESTYPE_MONITOR;
406
407 do {
408 if (code == m5mols_default_ffmt[type].code)
409 return type;
410 } while (type++ != SIZE_DEFAULT_FFMT);
411
412 return 0;
413}
414
415/**
416 * __find_resolution - Lookup preset and type of M-5MOLS's resolution
417 * @mf: pixel format to find/negotiate the resolution preset for
418 * @type: M-5MOLS resolution type
419 * @resolution: M-5MOLS resolution preset register value
420 *
421 * Find nearest resolution matching resolution preset and adjust mf
422 * to supported values.
423 */
424static int __find_resolution(struct v4l2_subdev *sd,
425 struct v4l2_mbus_framefmt *mf,
426 enum m5mols_restype *type,
427 u32 *resolution)
428{
429 const struct m5mols_resolution *fsize = &m5mols_reg_res[0];
430 const struct m5mols_resolution *match = NULL;
431 enum m5mols_restype stype = __find_restype(mf->code);
432 int i = ARRAY_SIZE(m5mols_reg_res);
433 unsigned int min_err = ~0;
434
435 while (i--) {
436 int err;
437 if (stype == fsize->type) {
438 err = abs(fsize->width - mf->width)
439 + abs(fsize->height - mf->height);
440
441 if (err < min_err) {
442 min_err = err;
443 match = fsize;
444 }
445 }
446 fsize++;
447 }
448 if (match) {
449 mf->width = match->width;
450 mf->height = match->height;
451 *resolution = match->reg;
452 *type = stype;
453 return 0;
454 }
455
456 return -EINVAL;
457}
458
459static struct v4l2_mbus_framefmt *__find_format(struct m5mols_info *info,
460 struct v4l2_subdev_fh *fh,
461 enum v4l2_subdev_format_whence which,
462 enum m5mols_restype type)
463{
464 if (which == V4L2_SUBDEV_FORMAT_TRY)
465 return fh ? v4l2_subdev_get_try_format(fh, 0) : NULL;
466
467 return &info->ffmt[type];
468}
469
470static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
471 struct v4l2_subdev_format *fmt)
472{
473 struct m5mols_info *info = to_m5mols(sd);
474 struct v4l2_mbus_framefmt *format;
475
476 if (fmt->pad != 0)
477 return -EINVAL;
478
479 format = __find_format(info, fh, fmt->which, info->res_type);
480 if (!format)
481 return -EINVAL;
482
483 fmt->format = *format;
484 return 0;
485}
486
487static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
488 struct v4l2_subdev_format *fmt)
489{
490 struct m5mols_info *info = to_m5mols(sd);
491 struct v4l2_mbus_framefmt *format = &fmt->format;
492 struct v4l2_mbus_framefmt *sfmt;
493 enum m5mols_restype type;
494 u32 resolution = 0;
495 int ret;
496
497 if (fmt->pad != 0)
498 return -EINVAL;
499
500 ret = __find_resolution(sd, format, &type, &resolution);
501 if (ret < 0)
502 return ret;
503
504 sfmt = __find_format(info, fh, fmt->which, type);
505 if (!sfmt)
506 return 0;
507
508 *sfmt = m5mols_default_ffmt[type];
509 sfmt->width = format->width;
510 sfmt->height = format->height;
511
512 if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
513 info->resolution = resolution;
514 info->code = format->code;
515 info->res_type = type;
516 }
517
518 return 0;
519}
520
521static int m5mols_enum_mbus_code(struct v4l2_subdev *sd,
522 struct v4l2_subdev_fh *fh,
523 struct v4l2_subdev_mbus_code_enum *code)
524{
525 if (!code || code->index >= SIZE_DEFAULT_FFMT)
526 return -EINVAL;
527
528 code->code = m5mols_default_ffmt[code->index].code;
529
530 return 0;
531}
532
533static struct v4l2_subdev_pad_ops m5mols_pad_ops = {
534 .enum_mbus_code = m5mols_enum_mbus_code,
535 .get_fmt = m5mols_get_fmt,
536 .set_fmt = m5mols_set_fmt,
537};
538
539/**
540 * m5mols_sync_controls - Apply default scene mode and the current controls
541 *
542 * This is used only streaming for syncing between v4l2_ctrl framework and
543 * m5mols's controls. First, do the scenemode to the sensor, then call
544 * v4l2_ctrl_handler_setup. It can be same between some commands and
545 * the scenemode's in the default v4l2_ctrls. But, such commands of control
546 * should be prior to the scenemode's one.
547 */
548int m5mols_sync_controls(struct m5mols_info *info)
549{
550 int ret = -EINVAL;
551
552 if (!is_ctrl_synced(info)) {
553 ret = m5mols_do_scenemode(info, REG_SCENE_NORMAL);
554 if (ret)
555 return ret;
556
557 v4l2_ctrl_handler_setup(&info->handle);
558 info->ctrl_sync = true;
559 }
560
561 return ret;
562}
563
564/**
565 * m5mols_start_monitor - Start the monitor mode
566 *
567 * Before applying the controls setup the resolution and frame rate
568 * in PARAMETER mode, and then switch over to MONITOR mode.
569 */
570static int m5mols_start_monitor(struct m5mols_info *info)
571{
572 struct v4l2_subdev *sd = &info->sd;
573 int ret;
574
575 ret = m5mols_mode(info, REG_PARAMETER);
576 if (!ret)
577 ret = m5mols_write(sd, PARM_MON_SIZE, info->resolution);
578 if (!ret)
579 ret = m5mols_write(sd, PARM_MON_FPS, REG_FPS_30);
580 if (!ret)
581 ret = m5mols_mode(info, REG_MONITOR);
582 if (!ret)
583 ret = m5mols_sync_controls(info);
584
585 return ret;
586}
587
588static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
589{
590 struct m5mols_info *info = to_m5mols(sd);
591
592 if (enable) {
593 int ret = -EINVAL;
594
595 if (is_code(info->code, M5MOLS_RESTYPE_MONITOR))
596 ret = m5mols_start_monitor(info);
597 if (is_code(info->code, M5MOLS_RESTYPE_CAPTURE))
598 ret = m5mols_start_capture(info);
599
600 return ret;
601 }
602
603 return m5mols_mode(info, REG_PARAMETER);
604}
605
606static const struct v4l2_subdev_video_ops m5mols_video_ops = {
607 .s_stream = m5mols_s_stream,
608};
609
610static int m5mols_s_ctrl(struct v4l2_ctrl *ctrl)
611{
612 struct v4l2_subdev *sd = to_sd(ctrl);
613 struct m5mols_info *info = to_m5mols(sd);
614 int ret;
615
616 info->mode_save = info->mode;
617
618 ret = m5mols_mode(info, REG_PARAMETER);
619 if (!ret)
620 ret = m5mols_set_ctrl(ctrl);
621 if (!ret)
622 ret = m5mols_mode(info, info->mode_save);
623
624 return ret;
625}
626
627static const struct v4l2_ctrl_ops m5mols_ctrl_ops = {
628 .s_ctrl = m5mols_s_ctrl,
629};
630
631static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
632{
633 struct v4l2_subdev *sd = &info->sd;
634 struct i2c_client *client = v4l2_get_subdevdata(sd);
635 const struct m5mols_platform_data *pdata = info->pdata;
636 int ret;
637
638 if (enable) {
639 if (is_powered(info))
640 return 0;
641
642 if (info->set_power) {
643 ret = info->set_power(&client->dev, 1);
644 if (ret)
645 return ret;
646 }
647
648 ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
649 if (ret) {
650 info->set_power(&client->dev, 0);
651 return ret;
652 }
653
654 gpio_set_value(pdata->gpio_reset, !pdata->reset_polarity);
655 usleep_range(1000, 1000);
656 info->power = true;
657
658 return ret;
659 }
660
661 if (!is_powered(info))
662 return 0;
663
664 ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies);
665 if (ret)
666 return ret;
667
668 if (info->set_power)
669 info->set_power(&client->dev, 0);
670
671 gpio_set_value(pdata->gpio_reset, pdata->reset_polarity);
672 usleep_range(1000, 1000);
673 info->power = false;
674
675 return ret;
676}
677
678/* m5mols_update_fw - optional firmware update routine */
679int __attribute__ ((weak)) m5mols_update_fw(struct v4l2_subdev *sd,
680 int (*set_power)(struct m5mols_info *, bool))
681{
682 return 0;
683}
684
685/**
686 * m5mols_sensor_armboot - Booting M-5MOLS internal ARM core.
687 *
688 * Booting internal ARM core makes the M-5MOLS is ready for getting commands
689 * with I2C. It's the first thing to be done after it powered up. It must wait
690 * at least 520ms recommended by M-5MOLS datasheet, after executing arm booting.
691 */
692static int m5mols_sensor_armboot(struct v4l2_subdev *sd)
693{
694 int ret;
695
696 ret = m5mols_write(sd, FLASH_CAM_START, REG_START_ARM_BOOT);
697 if (ret < 0)
698 return ret;
699
700 msleep(520);
701
702 ret = m5mols_get_version(sd);
703 if (!ret)
704 ret = m5mols_update_fw(sd, m5mols_sensor_power);
705 if (ret)
706 return ret;
707
708 v4l2_dbg(1, m5mols_debug, sd, "Success ARM Booting\n");
709
710 ret = m5mols_write(sd, PARM_INTERFACE, REG_INTERFACE_MIPI);
711 if (!ret)
712 ret = m5mols_enable_interrupt(sd, REG_INT_AF);
713
714 return ret;
715}
716
717static int m5mols_init_controls(struct m5mols_info *info)
718{
719 struct v4l2_subdev *sd = &info->sd;
720 u16 max_exposure;
721 u16 step_zoom;
722 int ret;
723
724 /* Determine value's range & step of controls for various FW version */
725 ret = m5mols_read(sd, AE_MAX_GAIN_MON, (u32 *)&max_exposure);
726 if (!ret)
727 step_zoom = is_manufacturer(info, REG_SAMSUNG_OPTICS) ? 31 : 1;
728 if (ret)
729 return ret;
730
731 v4l2_ctrl_handler_init(&info->handle, 6);
732 info->autowb = v4l2_ctrl_new_std(&info->handle,
733 &m5mols_ctrl_ops, V4L2_CID_AUTO_WHITE_BALANCE,
734 0, 1, 1, 0);
735 info->saturation = v4l2_ctrl_new_std(&info->handle,
736 &m5mols_ctrl_ops, V4L2_CID_SATURATION,
737 1, 5, 1, 3);
738 info->zoom = v4l2_ctrl_new_std(&info->handle,
739 &m5mols_ctrl_ops, V4L2_CID_ZOOM_ABSOLUTE,
740 1, 70, step_zoom, 1);
741 info->exposure = v4l2_ctrl_new_std(&info->handle,
742 &m5mols_ctrl_ops, V4L2_CID_EXPOSURE,
743 0, max_exposure, 1, (int)max_exposure/2);
744 info->colorfx = v4l2_ctrl_new_std_menu(&info->handle,
745 &m5mols_ctrl_ops, V4L2_CID_COLORFX,
746 4, (1 << V4L2_COLORFX_BW), V4L2_COLORFX_NONE);
747 info->autoexposure = v4l2_ctrl_new_std_menu(&info->handle,
748 &m5mols_ctrl_ops, V4L2_CID_EXPOSURE_AUTO,
749 1, 0, V4L2_EXPOSURE_MANUAL);
750
751 sd->ctrl_handler = &info->handle;
752 if (info->handle.error) {
753 v4l2_err(sd, "Failed to initialize controls: %d\n", ret);
754 v4l2_ctrl_handler_free(&info->handle);
755 return info->handle.error;
756 }
757
758 v4l2_ctrl_cluster(2, &info->autoexposure);
759
760 return 0;
761}
762
763/**
764 * m5mols_s_power - Main sensor power control function
765 *
766 * To prevent breaking the lens when the sensor is powered off the Soft-Landing
767 * algorithm is called where available. The Soft-Landing algorithm availability
768 * dependends on the firmware provider.
769 */
770static int m5mols_s_power(struct v4l2_subdev *sd, int on)
771{
772 struct m5mols_info *info = to_m5mols(sd);
773 int ret;
774
775 if (on) {
776 ret = m5mols_sensor_power(info, true);
777 if (!ret)
778 ret = m5mols_sensor_armboot(sd);
779 if (!ret)
780 ret = m5mols_init_controls(info);
781 if (ret)
782 return ret;
783
784 info->ffmt[M5MOLS_RESTYPE_MONITOR] =
785 m5mols_default_ffmt[M5MOLS_RESTYPE_MONITOR];
786 info->ffmt[M5MOLS_RESTYPE_CAPTURE] =
787 m5mols_default_ffmt[M5MOLS_RESTYPE_CAPTURE];
788 return ret;
789 }
790
791 if (is_manufacturer(info, REG_SAMSUNG_TECHWIN)) {
792 ret = m5mols_mode(info, REG_MONITOR);
793 if (!ret)
794 ret = m5mols_write(sd, AF_EXECUTE, REG_AF_STOP);
795 if (!ret)
796 ret = m5mols_write(sd, AF_MODE, REG_AF_POWEROFF);
797 if (!ret)
798 ret = m5mols_busy(sd, CAT_SYSTEM, CAT0_STATUS,
799 REG_AF_IDLE);
800 if (!ret)
801 v4l2_info(sd, "Success soft-landing lens\n");
802 }
803
804 ret = m5mols_sensor_power(info, false);
805 if (!ret) {
806 v4l2_ctrl_handler_free(&info->handle);
807 info->ctrl_sync = false;
808 }
809
810 return ret;
811}
812
813static int m5mols_log_status(struct v4l2_subdev *sd)
814{
815 struct m5mols_info *info = to_m5mols(sd);
816
817 v4l2_ctrl_handler_log_status(&info->handle, sd->name);
818
819 return 0;
820}
821
822static const struct v4l2_subdev_core_ops m5mols_core_ops = {
823 .s_power = m5mols_s_power,
824 .g_ctrl = v4l2_subdev_g_ctrl,
825 .s_ctrl = v4l2_subdev_s_ctrl,
826 .queryctrl = v4l2_subdev_queryctrl,
827 .querymenu = v4l2_subdev_querymenu,
828 .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
829 .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
830 .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
831 .log_status = m5mols_log_status,
832};
833
834static const struct v4l2_subdev_ops m5mols_ops = {
835 .core = &m5mols_core_ops,
836 .pad = &m5mols_pad_ops,
837 .video = &m5mols_video_ops,
838};
839
840static void m5mols_irq_work(struct work_struct *work)
841{
842 struct m5mols_info *info =
843 container_of(work, struct m5mols_info, work_irq);
844 struct v4l2_subdev *sd = &info->sd;
845 u32 reg;
846 int ret;
847
848 if (!is_powered(info) ||
849 m5mols_read(sd, SYSTEM_INT_FACTOR, &info->interrupt))
850 return;
851
852 switch (info->interrupt & REG_INT_MASK) {
853 case REG_INT_AF:
854 if (!is_available_af(info))
855 break;
856 ret = m5mols_read(sd, AF_STATUS, &reg);
857 v4l2_dbg(2, m5mols_debug, sd, "AF %s\n",
858 reg == REG_AF_FAIL ? "Failed" :
859 reg == REG_AF_SUCCESS ? "Success" :
860 reg == REG_AF_IDLE ? "Idle" : "Busy");
861 break;
862 case REG_INT_CAPTURE:
863 if (!test_and_set_bit(ST_CAPT_IRQ, &info->flags))
864 wake_up_interruptible(&info->irq_waitq);
865
866 v4l2_dbg(2, m5mols_debug, sd, "CAPTURE\n");
867 break;
868 default:
869 v4l2_dbg(2, m5mols_debug, sd, "Undefined: %02x\n", reg);
870 break;
871 };
872}
873
874static irqreturn_t m5mols_irq_handler(int irq, void *data)
875{
876 struct v4l2_subdev *sd = data;
877 struct m5mols_info *info = to_m5mols(sd);
878
879 schedule_work(&info->work_irq);
880
881 return IRQ_HANDLED;
882}
883
884static int __devinit m5mols_probe(struct i2c_client *client,
885 const struct i2c_device_id *id)
886{
887 const struct m5mols_platform_data *pdata = client->dev.platform_data;
888 struct m5mols_info *info;
889 struct v4l2_subdev *sd;
890 int ret;
891
892 if (pdata == NULL) {
893 dev_err(&client->dev, "No platform data\n");
894 return -EINVAL;
895 }
896
897 if (!gpio_is_valid(pdata->gpio_reset)) {
898 dev_err(&client->dev, "No valid RESET GPIO specified\n");
899 return -EINVAL;
900 }
901
902 if (!pdata->irq) {
903 dev_err(&client->dev, "Interrupt not assigned\n");
904 return -EINVAL;
905 }
906
907 info = kzalloc(sizeof(struct m5mols_info), GFP_KERNEL);
908 if (!info)
909 return -ENOMEM;
910
911 info->pdata = pdata;
912 info->set_power = pdata->set_power;
913
914 ret = gpio_request(pdata->gpio_reset, "M5MOLS_NRST");
915 if (ret) {
916 dev_err(&client->dev, "Failed to request gpio: %d\n", ret);
917 goto out_free;
918 }
919 gpio_direction_output(pdata->gpio_reset, pdata->reset_polarity);
920
921 ret = regulator_bulk_get(&client->dev, ARRAY_SIZE(supplies), supplies);
922 if (ret) {
923 dev_err(&client->dev, "Failed to get regulators: %d\n", ret);
924 goto out_gpio;
925 }
926
927 sd = &info->sd;
928 strlcpy(sd->name, MODULE_NAME, sizeof(sd->name));
929 v4l2_i2c_subdev_init(sd, client, &m5mols_ops);
930
931 info->pad.flags = MEDIA_PAD_FL_SOURCE;
932 ret = media_entity_init(&sd->entity, 1, &info->pad, 0);
933 if (ret < 0)
934 goto out_reg;
935 sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
936
937 init_waitqueue_head(&info->irq_waitq);
938 INIT_WORK(&info->work_irq, m5mols_irq_work);
939 ret = request_irq(pdata->irq, m5mols_irq_handler,
940 IRQF_TRIGGER_RISING, MODULE_NAME, sd);
941 if (ret) {
942 dev_err(&client->dev, "Interrupt request failed: %d\n", ret);
943 goto out_me;
944 }
945 info->res_type = M5MOLS_RESTYPE_MONITOR;
946 return 0;
947out_me:
948 media_entity_cleanup(&sd->entity);
949out_reg:
950 regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
951out_gpio:
952 gpio_free(pdata->gpio_reset);
953out_free:
954 kfree(info);
955 return ret;
956}
957
958static int __devexit m5mols_remove(struct i2c_client *client)
959{
960 struct v4l2_subdev *sd = i2c_get_clientdata(client);
961 struct m5mols_info *info = to_m5mols(sd);
962
963 v4l2_device_unregister_subdev(sd);
964 free_irq(info->pdata->irq, sd);
965
966 regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
967 gpio_free(info->pdata->gpio_reset);
968 media_entity_cleanup(&sd->entity);
969 kfree(info);
970 return 0;
971}
972
973static const struct i2c_device_id m5mols_id[] = {
974 { MODULE_NAME, 0 },
975 { },
976};
977MODULE_DEVICE_TABLE(i2c, m5mols_id);
978
979static struct i2c_driver m5mols_i2c_driver = {
980 .driver = {
981 .name = MODULE_NAME,
982 },
983 .probe = m5mols_probe,
984 .remove = __devexit_p(m5mols_remove),
985 .id_table = m5mols_id,
986};
987
988static int __init m5mols_mod_init(void)
989{
990 return i2c_add_driver(&m5mols_i2c_driver);
991}
992
993static void __exit m5mols_mod_exit(void)
994{
995 i2c_del_driver(&m5mols_i2c_driver);
996}
997
998module_init(m5mols_mod_init);
999module_exit(m5mols_mod_exit);
1000
1001MODULE_AUTHOR("HeungJun Kim <riverful.kim@samsung.com>");
1002MODULE_AUTHOR("Dongsoo Kim <dongsoo45.kim@samsung.com>");
1003MODULE_DESCRIPTION("Fujitsu M-5MOLS 8M Pixel camera driver");
1004MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/m5mols/m5mols_reg.h b/drivers/media/video/m5mols/m5mols_reg.h
new file mode 100644
index 000000000000..b83e36fc6ac6
--- /dev/null
+++ b/drivers/media/video/m5mols/m5mols_reg.h
@@ -0,0 +1,399 @@
1/*
2 * Register map for M-5MOLS 8M Pixel camera sensor with ISP
3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * Author: HeungJun Kim, riverful.kim@samsung.com
6 *
7 * Copyright (C) 2009 Samsung Electronics Co., Ltd.
8 * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#ifndef M5MOLS_REG_H
17#define M5MOLS_REG_H
18
19#define M5MOLS_I2C_MAX_SIZE 4
20#define M5MOLS_BYTE_READ 0x01
21#define M5MOLS_BYTE_WRITE 0x02
22
23#define I2C_CATEGORY(__cat) ((__cat >> 16) & 0xff)
24#define I2C_COMMAND(__comm) ((__comm >> 8) & 0xff)
25#define I2C_SIZE(__reg_s) ((__reg_s) & 0xff)
26#define I2C_REG(__cat, __cmd, __reg_s) ((__cat << 16) | (__cmd << 8) | __reg_s)
27
28/*
29 * Category section register
30 *
31 * The category means set including relevant command of M-5MOLS.
32 */
33#define CAT_SYSTEM 0x00
34#define CAT_PARAM 0x01
35#define CAT_MONITOR 0x02
36#define CAT_AE 0x03
37#define CAT_WB 0x06
38#define CAT_EXIF 0x07
39#define CAT_FD 0x09
40#define CAT_LENS 0x0a
41#define CAT_CAPT_PARM 0x0b
42#define CAT_CAPT_CTRL 0x0c
43#define CAT_FLASH 0x0f /* related to FW, revisions, booting */
44
45/*
46 * Category 0 - SYSTEM mode
47 *
48 * The SYSTEM mode in the M-5MOLS means area available to handle with the whole
49 * & all-round system of sensor. It deals with version/interrupt/setting mode &
50 * even sensor's status. Especially, the M-5MOLS sensor with ISP varies by
51 * packaging & manufacturer, even the customer and project code. And the
52 * function details may vary among them. The version information helps to
53 * determine what methods shall be used in the driver.
54 *
55 * There is many registers between customer version address and awb one. For
56 * more specific contents, see definition if file m5mols.h.
57 */
58#define CAT0_VER_CUSTOMER 0x00 /* customer version */
59#define CAT0_VER_AWB 0x09 /* Auto WB version */
60#define CAT0_VER_STRING 0x0a /* string including M-5MOLS */
61#define CAT0_SYSMODE 0x0b /* SYSTEM mode register */
62#define CAT0_STATUS 0x0c /* SYSTEM mode status register */
63#define CAT0_INT_FACTOR 0x10 /* interrupt pending register */
64#define CAT0_INT_ENABLE 0x11 /* interrupt enable register */
65
66#define SYSTEM_SYSMODE I2C_REG(CAT_SYSTEM, CAT0_SYSMODE, 1)
67#define REG_SYSINIT 0x00 /* SYSTEM mode */
68#define REG_PARAMETER 0x01 /* PARAMETER mode */
69#define REG_MONITOR 0x02 /* MONITOR mode */
70#define REG_CAPTURE 0x03 /* CAPTURE mode */
71
72#define SYSTEM_CMD(__cmd) I2C_REG(CAT_SYSTEM, cmd, 1)
73#define SYSTEM_VER_STRING I2C_REG(CAT_SYSTEM, CAT0_VER_STRING, 1)
74#define REG_SAMSUNG_ELECTRO "SE" /* Samsung Electro-Mechanics */
75#define REG_SAMSUNG_OPTICS "OP" /* Samsung Fiber-Optics */
76#define REG_SAMSUNG_TECHWIN "TB" /* Samsung Techwin */
77
78#define SYSTEM_INT_FACTOR I2C_REG(CAT_SYSTEM, CAT0_INT_FACTOR, 1)
79#define SYSTEM_INT_ENABLE I2C_REG(CAT_SYSTEM, CAT0_INT_ENABLE, 1)
80#define REG_INT_MODE (1 << 0)
81#define REG_INT_AF (1 << 1)
82#define REG_INT_ZOOM (1 << 2)
83#define REG_INT_CAPTURE (1 << 3)
84#define REG_INT_FRAMESYNC (1 << 4)
85#define REG_INT_FD (1 << 5)
86#define REG_INT_LENS_INIT (1 << 6)
87#define REG_INT_SOUND (1 << 7)
88#define REG_INT_MASK 0x0f
89
90/*
91 * category 1 - PARAMETER mode
92 *
93 * This category supports function of camera features of M-5MOLS. It means we
94 * can handle with preview(MONITOR) resolution size/frame per second/interface
95 * between the sensor and the Application Processor/even the image effect.
96 */
97#define CAT1_DATA_INTERFACE 0x00 /* interface between sensor and AP */
98#define CAT1_MONITOR_SIZE 0x01 /* resolution at the MONITOR mode */
99#define CAT1_MONITOR_FPS 0x02 /* frame per second at this mode */
100#define CAT1_EFFECT 0x0b /* image effects */
101
102#define PARM_MON_SIZE I2C_REG(CAT_PARAM, CAT1_MONITOR_SIZE, 1)
103
104#define PARM_MON_FPS I2C_REG(CAT_PARAM, CAT1_MONITOR_FPS, 1)
105#define REG_FPS_30 0x02
106
107#define PARM_INTERFACE I2C_REG(CAT_PARAM, CAT1_DATA_INTERFACE, 1)
108#define REG_INTERFACE_MIPI 0x02
109
110#define PARM_EFFECT I2C_REG(CAT_PARAM, CAT1_EFFECT, 1)
111#define REG_EFFECT_OFF 0x00
112#define REG_EFFECT_NEGA 0x01
113#define REG_EFFECT_EMBOSS 0x06
114#define REG_EFFECT_OUTLINE 0x07
115#define REG_EFFECT_WATERCOLOR 0x08
116
117/*
118 * Category 2 - MONITOR mode
119 *
120 * The MONITOR mode is same as preview mode as we said. The M-5MOLS has another
121 * mode named "Preview", but this preview mode is used at the case specific
122 * vider-recording mode. This mmode supports only YUYV format. On the other
123 * hand, the JPEG & RAW formats is supports by CAPTURE mode. And, there are
124 * another options like zoom/color effect(different with effect in PARAMETER
125 * mode)/anti hand shaking algorithm.
126 */
127#define CAT2_ZOOM 0x01 /* set the zoom position & execute */
128#define CAT2_ZOOM_STEP 0x03 /* set the zoom step */
129#define CAT2_CFIXB 0x09 /* CB value for color effect */
130#define CAT2_CFIXR 0x0a /* CR value for color effect */
131#define CAT2_COLOR_EFFECT 0x0b /* set on/off of color effect */
132#define CAT2_CHROMA_LVL 0x0f /* set chroma level */
133#define CAT2_CHROMA_EN 0x10 /* set on/off of choroma */
134#define CAT2_EDGE_LVL 0x11 /* set sharpness level */
135#define CAT2_EDGE_EN 0x12 /* set on/off sharpness */
136#define CAT2_TONE_CTL 0x25 /* set tone color(contrast) */
137
138#define MON_ZOOM I2C_REG(CAT_MONITOR, CAT2_ZOOM, 1)
139
140#define MON_CFIXR I2C_REG(CAT_MONITOR, CAT2_CFIXR, 1)
141#define MON_CFIXB I2C_REG(CAT_MONITOR, CAT2_CFIXB, 1)
142#define REG_CFIXB_SEPIA 0xd8
143#define REG_CFIXR_SEPIA 0x18
144
145#define MON_EFFECT I2C_REG(CAT_MONITOR, CAT2_COLOR_EFFECT, 1)
146#define REG_COLOR_EFFECT_OFF 0x00
147#define REG_COLOR_EFFECT_ON 0x01
148
149#define MON_CHROMA_EN I2C_REG(CAT_MONITOR, CAT2_CHROMA_EN, 1)
150#define MON_CHROMA_LVL I2C_REG(CAT_MONITOR, CAT2_CHROMA_LVL, 1)
151#define REG_CHROMA_OFF 0x00
152#define REG_CHROMA_ON 0x01
153
154#define MON_EDGE_EN I2C_REG(CAT_MONITOR, CAT2_EDGE_EN, 1)
155#define MON_EDGE_LVL I2C_REG(CAT_MONITOR, CAT2_EDGE_LVL, 1)
156#define REG_EDGE_OFF 0x00
157#define REG_EDGE_ON 0x01
158
159#define MON_TONE_CTL I2C_REG(CAT_MONITOR, CAT2_TONE_CTL, 1)
160
161/*
162 * Category 3 - Auto Exposure
163 *
164 * The M-5MOLS exposure capbility is detailed as which is similar to digital
165 * camera. This category supports AE locking/various AE mode(range of exposure)
166 * /ISO/flickering/EV bias/shutter/meteoring, and anything else. And the
167 * maximum/minimum exposure gain value depending on M-5MOLS firmware, may be
168 * different. So, this category also provide getting the max/min values. And,
169 * each MONITOR and CAPTURE mode has each gain/shutter/max exposure values.
170 */
171#define CAT3_AE_LOCK 0x00 /* locking Auto exposure */
172#define CAT3_AE_MODE 0x01 /* set AE mode, mode means range */
173#define CAT3_ISO 0x05 /* set ISO */
174#define CAT3_EV_PRESET_MONITOR 0x0a /* EV(scenemode) preset for MONITOR */
175#define CAT3_EV_PRESET_CAPTURE 0x0b /* EV(scenemode) preset for CAPTURE */
176#define CAT3_MANUAL_GAIN_MON 0x12 /* meteoring value for the MONITOR */
177#define CAT3_MAX_GAIN_MON 0x1a /* max gain value for the MONITOR */
178#define CAT3_MANUAL_GAIN_CAP 0x26 /* meteoring value for the CAPTURE */
179#define CAT3_AE_INDEX 0x38 /* AE index */
180
181#define AE_LOCK I2C_REG(CAT_AE, CAT3_AE_LOCK, 1)
182#define REG_AE_UNLOCK 0x00
183#define REG_AE_LOCK 0x01
184
185#define AE_MODE I2C_REG(CAT_AE, CAT3_AE_MODE, 1)
186#define REG_AE_OFF 0x00 /* AE off */
187#define REG_AE_ALL 0x01 /* calc AE in all block integral */
188#define REG_AE_CENTER 0x03 /* calc AE in center weighted */
189#define REG_AE_SPOT 0x06 /* calc AE in specific spot */
190
191#define AE_ISO I2C_REG(CAT_AE, CAT3_ISO, 1)
192#define REG_ISO_AUTO 0x00
193#define REG_ISO_50 0x01
194#define REG_ISO_100 0x02
195#define REG_ISO_200 0x03
196#define REG_ISO_400 0x04
197#define REG_ISO_800 0x05
198
199#define AE_EV_PRESET_MONITOR I2C_REG(CAT_AE, CAT3_EV_PRESET_MONITOR, 1)
200#define AE_EV_PRESET_CAPTURE I2C_REG(CAT_AE, CAT3_EV_PRESET_CAPTURE, 1)
201#define REG_SCENE_NORMAL 0x00
202#define REG_SCENE_PORTRAIT 0x01
203#define REG_SCENE_LANDSCAPE 0x02
204#define REG_SCENE_SPORTS 0x03
205#define REG_SCENE_PARTY_INDOOR 0x04
206#define REG_SCENE_BEACH_SNOW 0x05
207#define REG_SCENE_SUNSET 0x06
208#define REG_SCENE_DAWN_DUSK 0x07
209#define REG_SCENE_FALL 0x08
210#define REG_SCENE_NIGHT 0x09
211#define REG_SCENE_AGAINST_LIGHT 0x0a
212#define REG_SCENE_FIRE 0x0b
213#define REG_SCENE_TEXT 0x0c
214#define REG_SCENE_CANDLE 0x0d
215
216#define AE_MAN_GAIN_MON I2C_REG(CAT_AE, CAT3_MANUAL_GAIN_MON, 2)
217#define AE_MAX_GAIN_MON I2C_REG(CAT_AE, CAT3_MAX_GAIN_MON, 2)
218#define AE_MAN_GAIN_CAP I2C_REG(CAT_AE, CAT3_MANUAL_GAIN_CAP, 2)
219
220#define AE_INDEX I2C_REG(CAT_AE, CAT3_AE_INDEX, 1)
221#define REG_AE_INDEX_20_NEG 0x00
222#define REG_AE_INDEX_15_NEG 0x01
223#define REG_AE_INDEX_10_NEG 0x02
224#define REG_AE_INDEX_05_NEG 0x03
225#define REG_AE_INDEX_00 0x04
226#define REG_AE_INDEX_05_POS 0x05
227#define REG_AE_INDEX_10_POS 0x06
228#define REG_AE_INDEX_15_POS 0x07
229#define REG_AE_INDEX_20_POS 0x08
230
231/*
232 * Category 6 - White Balance
233 *
234 * This category provide AWB locking/mode/preset/speed/gain bias, etc.
235 */
236#define CAT6_AWB_LOCK 0x00 /* locking Auto Whitebalance */
237#define CAT6_AWB_MODE 0x02 /* set Auto or Manual */
238#define CAT6_AWB_MANUAL 0x03 /* set Manual(preset) value */
239
240#define AWB_LOCK I2C_REG(CAT_WB, CAT6_AWB_LOCK, 1)
241#define REG_AWB_UNLOCK 0x00
242#define REG_AWB_LOCK 0x01
243
244#define AWB_MODE I2C_REG(CAT_WB, CAT6_AWB_MODE, 1)
245#define REG_AWB_AUTO 0x01 /* AWB off */
246#define REG_AWB_PRESET 0x02 /* AWB preset */
247
248#define AWB_MANUAL I2C_REG(CAT_WB, CAT6_AWB_MANUAL, 1)
249#define REG_AWB_INCANDESCENT 0x01
250#define REG_AWB_FLUORESCENT_1 0x02
251#define REG_AWB_FLUORESCENT_2 0x03
252#define REG_AWB_DAYLIGHT 0x04
253#define REG_AWB_CLOUDY 0x05
254#define REG_AWB_SHADE 0x06
255#define REG_AWB_HORIZON 0x07
256#define REG_AWB_LEDLIGHT 0x09
257
258/*
259 * Category 7 - EXIF information
260 */
261#define CAT7_INFO_EXPTIME_NU 0x00
262#define CAT7_INFO_EXPTIME_DE 0x04
263#define CAT7_INFO_TV_NU 0x08
264#define CAT7_INFO_TV_DE 0x0c
265#define CAT7_INFO_AV_NU 0x10
266#define CAT7_INFO_AV_DE 0x14
267#define CAT7_INFO_BV_NU 0x18
268#define CAT7_INFO_BV_DE 0x1c
269#define CAT7_INFO_EBV_NU 0x20
270#define CAT7_INFO_EBV_DE 0x24
271#define CAT7_INFO_ISO 0x28
272#define CAT7_INFO_FLASH 0x2a
273#define CAT7_INFO_SDR 0x2c
274#define CAT7_INFO_QVAL 0x2e
275
276#define EXIF_INFO_EXPTIME_NU I2C_REG(CAT_EXIF, CAT7_INFO_EXPTIME_NU, 4)
277#define EXIF_INFO_EXPTIME_DE I2C_REG(CAT_EXIF, CAT7_INFO_EXPTIME_DE, 4)
278#define EXIF_INFO_TV_NU I2C_REG(CAT_EXIF, CAT7_INFO_TV_NU, 4)
279#define EXIF_INFO_TV_DE I2C_REG(CAT_EXIF, CAT7_INFO_TV_DE, 4)
280#define EXIF_INFO_AV_NU I2C_REG(CAT_EXIF, CAT7_INFO_AV_NU, 4)
281#define EXIF_INFO_AV_DE I2C_REG(CAT_EXIF, CAT7_INFO_AV_DE, 4)
282#define EXIF_INFO_BV_NU I2C_REG(CAT_EXIF, CAT7_INFO_BV_NU, 4)
283#define EXIF_INFO_BV_DE I2C_REG(CAT_EXIF, CAT7_INFO_BV_DE, 4)
284#define EXIF_INFO_EBV_NU I2C_REG(CAT_EXIF, CAT7_INFO_EBV_NU, 4)
285#define EXIF_INFO_EBV_DE I2C_REG(CAT_EXIF, CAT7_INFO_EBV_DE, 4)
286#define EXIF_INFO_ISO I2C_REG(CAT_EXIF, CAT7_INFO_ISO, 2)
287#define EXIF_INFO_FLASH I2C_REG(CAT_EXIF, CAT7_INFO_FLASH, 2)
288#define EXIF_INFO_SDR I2C_REG(CAT_EXIF, CAT7_INFO_SDR, 2)
289#define EXIF_INFO_QVAL I2C_REG(CAT_EXIF, CAT7_INFO_QVAL, 2)
290
291/*
292 * Category 9 - Face Detection
293 */
294#define CAT9_FD_CTL 0x00
295
296#define FD_CTL I2C_REG(CAT_FD, CAT9_FD_CTL, 1)
297#define BIT_FD_EN 0
298#define BIT_FD_DRAW_FACE_FRAME 4
299#define BIT_FD_DRAW_SMILE_LVL 6
300#define REG_FD(shift) (1 << shift)
301#define REG_FD_OFF 0x0
302
303/*
304 * Category A - Lens Parameter
305 */
306#define CATA_AF_MODE 0x01
307#define CATA_AF_EXECUTE 0x02
308#define CATA_AF_STATUS 0x03
309#define CATA_AF_VERSION 0x0a
310
311#define AF_MODE I2C_REG(CAT_LENS, CATA_AF_MODE, 1)
312#define REG_AF_NORMAL 0x00 /* Normal AF, one time */
313#define REG_AF_MACRO 0x01 /* Macro AF, one time */
314#define REG_AF_POWEROFF 0x07
315
316#define AF_EXECUTE I2C_REG(CAT_LENS, CATA_AF_EXECUTE, 1)
317#define REG_AF_STOP 0x00
318#define REG_AF_EXE_AUTO 0x01
319#define REG_AF_EXE_CAF 0x02
320
321#define AF_STATUS I2C_REG(CAT_LENS, CATA_AF_STATUS, 1)
322#define REG_AF_FAIL 0x00
323#define REG_AF_SUCCESS 0x02
324#define REG_AF_IDLE 0x04
325#define REG_AF_BUSY 0x05
326
327#define AF_VERSION I2C_REG(CAT_LENS, CATA_AF_VERSION, 1)
328
329/*
330 * Category B - CAPTURE Parameter
331 */
332#define CATB_YUVOUT_MAIN 0x00
333#define CATB_MAIN_IMAGE_SIZE 0x01
334#define CATB_MCC_MODE 0x1d
335#define CATB_WDR_EN 0x2c
336#define CATB_LIGHT_CTRL 0x40
337#define CATB_FLASH_CTRL 0x41
338
339#define CAPP_YUVOUT_MAIN I2C_REG(CAT_CAPT_PARM, CATB_YUVOUT_MAIN, 1)
340#define REG_YUV422 0x00
341#define REG_BAYER10 0x05
342#define REG_BAYER8 0x06
343#define REG_JPEG 0x10
344
345#define CAPP_MAIN_IMAGE_SIZE I2C_REG(CAT_CAPT_PARM, CATB_MAIN_IMAGE_SIZE, 1)
346
347#define CAPP_MCC_MODE I2C_REG(CAT_CAPT_PARM, CATB_MCC_MODE, 1)
348#define REG_MCC_OFF 0x00
349#define REG_MCC_NORMAL 0x01
350
351#define CAPP_WDR_EN I2C_REG(CAT_CAPT_PARM, CATB_WDR_EN, 1)
352#define REG_WDR_OFF 0x00
353#define REG_WDR_ON 0x01
354#define REG_WDR_AUTO 0x02
355
356#define CAPP_LIGHT_CTRL I2C_REG(CAT_CAPT_PARM, CATB_LIGHT_CTRL, 1)
357#define REG_LIGHT_OFF 0x00
358#define REG_LIGHT_ON 0x01
359#define REG_LIGHT_AUTO 0x02
360
361#define CAPP_FLASH_CTRL I2C_REG(CAT_CAPT_PARM, CATB_FLASH_CTRL, 1)
362#define REG_FLASH_OFF 0x00
363#define REG_FLASH_ON 0x01
364#define REG_FLASH_AUTO 0x02
365
366/*
367 * Category C - CAPTURE Control
368 */
369#define CATC_CAP_MODE 0x00
370#define CATC_CAP_SEL_FRAME 0x06 /* It determines Single or Multi */
371#define CATC_CAP_START 0x09
372#define CATC_CAP_IMAGE_SIZE 0x0d
373#define CATC_CAP_THUMB_SIZE 0x11
374
375#define CAPC_MODE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_MODE, 1)
376#define REG_CAP_NONE 0x00
377#define REG_CAP_ANTI_SHAKE 0x02
378
379#define CAPC_SEL_FRAME I2C_REG(CAT_CAPT_CTRL, CATC_CAP_SEL_FRAME, 1)
380
381#define CAPC_START I2C_REG(CAT_CAPT_CTRL, CATC_CAP_START, 1)
382#define REG_CAP_START_MAIN 0x01
383#define REG_CAP_START_THUMB 0x03
384
385#define CAPC_IMAGE_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_IMAGE_SIZE, 1)
386#define CAPC_THUMB_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_THUMB_SIZE, 1)
387
388/*
389 * Category F - Flash
390 *
391 * This mode provides functions about internal flash stuff and system startup.
392 */
393#define CATF_CAM_START 0x12 /* It starts internal ARM core booting
394 * after power-up */
395
396#define FLASH_CAM_START I2C_REG(CAT_FLASH, CATF_CAM_START, 1)
397#define REG_START_ARM_BOOT 0x01
398
399#endif /* M5MOLS_REG_H */
diff --git a/drivers/media/video/uvc/Makefile b/drivers/media/video/uvc/Makefile
index 968c1994eda0..2071ca8a2f03 100644
--- a/drivers/media/video/uvc/Makefile
+++ b/drivers/media/video/uvc/Makefile
@@ -1,3 +1,6 @@
1uvcvideo-objs := uvc_driver.o uvc_queue.o uvc_v4l2.o uvc_video.o uvc_ctrl.o \ 1uvcvideo-objs := uvc_driver.o uvc_queue.o uvc_v4l2.o uvc_video.o uvc_ctrl.o \
2 uvc_status.o uvc_isight.o 2 uvc_status.o uvc_isight.o
3ifeq ($(CONFIG_MEDIA_CONTROLLER),y)
4uvcvideo-objs += uvc_entity.o
5endif
3obj-$(CONFIG_USB_VIDEO_CLASS) += uvcvideo.o 6obj-$(CONFIG_USB_VIDEO_CLASS) += uvcvideo.o
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index 823f4b389745..b6eae48d7fb8 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -248,7 +248,7 @@ uint32_t uvc_fraction_to_interval(uint32_t numerator, uint32_t denominator)
248 * Terminal and unit management 248 * Terminal and unit management
249 */ 249 */
250 250
251static struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id) 251struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id)
252{ 252{
253 struct uvc_entity *entity; 253 struct uvc_entity *entity;
254 254
@@ -795,9 +795,12 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
795 struct uvc_entity *entity; 795 struct uvc_entity *entity;
796 unsigned int num_inputs; 796 unsigned int num_inputs;
797 unsigned int size; 797 unsigned int size;
798 unsigned int i;
798 799
800 extra_size = ALIGN(extra_size, sizeof(*entity->pads));
799 num_inputs = (type & UVC_TERM_OUTPUT) ? num_pads : num_pads - 1; 801 num_inputs = (type & UVC_TERM_OUTPUT) ? num_pads : num_pads - 1;
800 size = sizeof(*entity) + extra_size + num_inputs; 802 size = sizeof(*entity) + extra_size + sizeof(*entity->pads) * num_pads
803 + num_inputs;
801 entity = kzalloc(size, GFP_KERNEL); 804 entity = kzalloc(size, GFP_KERNEL);
802 if (entity == NULL) 805 if (entity == NULL)
803 return NULL; 806 return NULL;
@@ -805,8 +808,17 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
805 entity->id = id; 808 entity->id = id;
806 entity->type = type; 809 entity->type = type;
807 810
811 entity->num_links = 0;
812 entity->num_pads = num_pads;
813 entity->pads = ((void *)(entity + 1)) + extra_size;
814
815 for (i = 0; i < num_inputs; ++i)
816 entity->pads[i].flags = MEDIA_PAD_FL_SINK;
817 if (!UVC_ENTITY_IS_OTERM(entity))
818 entity->pads[num_pads-1].flags = MEDIA_PAD_FL_SOURCE;
819
808 entity->bNrInPins = num_inputs; 820 entity->bNrInPins = num_inputs;
809 entity->baSourceID = ((__u8 *)entity) + sizeof(*entity) + extra_size; 821 entity->baSourceID = (__u8 *)(&entity->pads[num_pads]);
810 822
811 return entity; 823 return entity;
812} 824}
@@ -1585,6 +1597,13 @@ static void uvc_delete(struct uvc_device *dev)
1585 uvc_status_cleanup(dev); 1597 uvc_status_cleanup(dev);
1586 uvc_ctrl_cleanup_device(dev); 1598 uvc_ctrl_cleanup_device(dev);
1587 1599
1600 if (dev->vdev.dev)
1601 v4l2_device_unregister(&dev->vdev);
1602#ifdef CONFIG_MEDIA_CONTROLLER
1603 if (media_devnode_is_registered(&dev->mdev.devnode))
1604 media_device_unregister(&dev->mdev);
1605#endif
1606
1588 list_for_each_safe(p, n, &dev->chains) { 1607 list_for_each_safe(p, n, &dev->chains) {
1589 struct uvc_video_chain *chain; 1608 struct uvc_video_chain *chain;
1590 chain = list_entry(p, struct uvc_video_chain, list); 1609 chain = list_entry(p, struct uvc_video_chain, list);
@@ -1594,6 +1613,13 @@ static void uvc_delete(struct uvc_device *dev)
1594 list_for_each_safe(p, n, &dev->entities) { 1613 list_for_each_safe(p, n, &dev->entities) {
1595 struct uvc_entity *entity; 1614 struct uvc_entity *entity;
1596 entity = list_entry(p, struct uvc_entity, list); 1615 entity = list_entry(p, struct uvc_entity, list);
1616#ifdef CONFIG_MEDIA_CONTROLLER
1617 uvc_mc_cleanup_entity(entity);
1618#endif
1619 if (entity->vdev) {
1620 video_device_release(entity->vdev);
1621 entity->vdev = NULL;
1622 }
1597 kfree(entity); 1623 kfree(entity);
1598 } 1624 }
1599 1625
@@ -1616,8 +1642,6 @@ static void uvc_release(struct video_device *vdev)
1616 struct uvc_streaming *stream = video_get_drvdata(vdev); 1642 struct uvc_streaming *stream = video_get_drvdata(vdev);
1617 struct uvc_device *dev = stream->dev; 1643 struct uvc_device *dev = stream->dev;
1618 1644
1619 video_device_release(vdev);
1620
1621 /* Decrement the registered streams count and delete the device when it 1645 /* Decrement the registered streams count and delete the device when it
1622 * reaches zero. 1646 * reaches zero.
1623 */ 1647 */
@@ -1682,7 +1706,7 @@ static int uvc_register_video(struct uvc_device *dev,
1682 * unregistered before the reference is released, so we don't need to 1706 * unregistered before the reference is released, so we don't need to
1683 * get another one. 1707 * get another one.
1684 */ 1708 */
1685 vdev->parent = &dev->intf->dev; 1709 vdev->v4l2_dev = &dev->vdev;
1686 vdev->fops = &uvc_fops; 1710 vdev->fops = &uvc_fops;
1687 vdev->release = uvc_release; 1711 vdev->release = uvc_release;
1688 strlcpy(vdev->name, dev->name, sizeof vdev->name); 1712 strlcpy(vdev->name, dev->name, sizeof vdev->name);
@@ -1731,6 +1755,8 @@ static int uvc_register_terms(struct uvc_device *dev,
1731 ret = uvc_register_video(dev, stream); 1755 ret = uvc_register_video(dev, stream);
1732 if (ret < 0) 1756 if (ret < 0)
1733 return ret; 1757 return ret;
1758
1759 term->vdev = stream->vdev;
1734 } 1760 }
1735 1761
1736 return 0; 1762 return 0;
@@ -1745,6 +1771,14 @@ static int uvc_register_chains(struct uvc_device *dev)
1745 ret = uvc_register_terms(dev, chain); 1771 ret = uvc_register_terms(dev, chain);
1746 if (ret < 0) 1772 if (ret < 0)
1747 return ret; 1773 return ret;
1774
1775#ifdef CONFIG_MEDIA_CONTROLLER
1776 ret = uvc_mc_register_entities(chain);
1777 if (ret < 0) {
1778 uvc_printk(KERN_INFO, "Failed to register entites "
1779 "(%d).\n", ret);
1780 }
1781#endif
1748 } 1782 }
1749 1783
1750 return 0; 1784 return 0;
@@ -1814,6 +1848,24 @@ static int uvc_probe(struct usb_interface *intf,
1814 "linux-uvc-devel mailing list.\n"); 1848 "linux-uvc-devel mailing list.\n");
1815 } 1849 }
1816 1850
1851 /* Register the media and V4L2 devices. */
1852#ifdef CONFIG_MEDIA_CONTROLLER
1853 dev->mdev.dev = &intf->dev;
1854 strlcpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
1855 if (udev->serial)
1856 strlcpy(dev->mdev.serial, udev->serial,
1857 sizeof(dev->mdev.serial));
1858 strcpy(dev->mdev.bus_info, udev->devpath);
1859 dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
1860 dev->mdev.driver_version = DRIVER_VERSION_NUMBER;
1861 if (media_device_register(&dev->mdev) < 0)
1862 goto error;
1863
1864 dev->vdev.mdev = &dev->mdev;
1865#endif
1866 if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
1867 goto error;
1868
1817 /* Initialize controls. */ 1869 /* Initialize controls. */
1818 if (uvc_ctrl_init_device(dev) < 0) 1870 if (uvc_ctrl_init_device(dev) < 0)
1819 goto error; 1871 goto error;
@@ -1822,7 +1874,7 @@ static int uvc_probe(struct usb_interface *intf,
1822 if (uvc_scan_device(dev) < 0) 1874 if (uvc_scan_device(dev) < 0)
1823 goto error; 1875 goto error;
1824 1876
1825 /* Register video devices. */ 1877 /* Register video device nodes. */
1826 if (uvc_register_chains(dev) < 0) 1878 if (uvc_register_chains(dev) < 0)
1827 goto error; 1879 goto error;
1828 1880
diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c
new file mode 100644
index 000000000000..ede7852bb1df
--- /dev/null
+++ b/drivers/media/video/uvc/uvc_entity.c
@@ -0,0 +1,118 @@
1/*
2 * uvc_entity.c -- USB Video Class driver
3 *
4 * Copyright (C) 2005-2011
5 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/list.h>
16#include <linux/videodev2.h>
17
18#include <media/v4l2-common.h>
19
20#include "uvcvideo.h"
21
22/* ------------------------------------------------------------------------
23 * Video subdevices registration and unregistration
24 */
25
26static int uvc_mc_register_entity(struct uvc_video_chain *chain,
27 struct uvc_entity *entity)
28{
29 const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE;
30 struct uvc_entity *remote;
31 unsigned int i;
32 u8 remote_pad;
33 int ret;
34
35 for (i = 0; i < entity->num_pads; ++i) {
36 struct media_entity *source;
37 struct media_entity *sink;
38
39 if (!(entity->pads[i].flags & MEDIA_PAD_FL_SINK))
40 continue;
41
42 remote = uvc_entity_by_id(chain->dev, entity->baSourceID[i]);
43 if (remote == NULL)
44 return -EINVAL;
45
46 source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
47 ? &remote->vdev->entity : &remote->subdev.entity;
48 sink = (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING)
49 ? &entity->vdev->entity : &entity->subdev.entity;
50
51 remote_pad = remote->num_pads - 1;
52 ret = media_entity_create_link(source, remote_pad,
53 sink, i, flags);
54 if (ret < 0)
55 return ret;
56 }
57
58 if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING)
59 ret = v4l2_device_register_subdev(&chain->dev->vdev,
60 &entity->subdev);
61
62 return ret;
63}
64
65static struct v4l2_subdev_ops uvc_subdev_ops = {
66};
67
68void uvc_mc_cleanup_entity(struct uvc_entity *entity)
69{
70 if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING)
71 media_entity_cleanup(&entity->subdev.entity);
72 else if (entity->vdev != NULL)
73 media_entity_cleanup(&entity->vdev->entity);
74}
75
76static int uvc_mc_init_entity(struct uvc_entity *entity)
77{
78 int ret;
79
80 if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) {
81 v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops);
82 strlcpy(entity->subdev.name, entity->name,
83 sizeof(entity->subdev.name));
84
85 ret = media_entity_init(&entity->subdev.entity,
86 entity->num_pads, entity->pads, 0);
87 } else
88 ret = media_entity_init(&entity->vdev->entity,
89 entity->num_pads, entity->pads, 0);
90
91 return ret;
92}
93
94int uvc_mc_register_entities(struct uvc_video_chain *chain)
95{
96 struct uvc_entity *entity;
97 int ret;
98
99 list_for_each_entry(entity, &chain->entities, chain) {
100 ret = uvc_mc_init_entity(entity);
101 if (ret < 0) {
102 uvc_printk(KERN_INFO, "Failed to initialize entity for "
103 "entity %u\n", entity->id);
104 return ret;
105 }
106 }
107
108 list_for_each_entry(entity, &chain->entities, chain) {
109 ret = uvc_mc_register_entity(chain, entity);
110 if (ret < 0) {
111 uvc_printk(KERN_INFO, "Failed to register entity for "
112 "entity %u\n", entity->id);
113 return ret;
114 }
115 }
116
117 return 0;
118}
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index 7cf224bae2e5..20107fd3574d 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -98,8 +98,11 @@ struct uvc_xu_control {
98#ifdef __KERNEL__ 98#ifdef __KERNEL__
99 99
100#include <linux/poll.h> 100#include <linux/poll.h>
101#include <linux/usb.h>
101#include <linux/usb/video.h> 102#include <linux/usb/video.h>
102#include <linux/uvcvideo.h> 103#include <linux/uvcvideo.h>
104#include <media/media-device.h>
105#include <media/v4l2-device.h>
103 106
104/* -------------------------------------------------------------------------- 107/* --------------------------------------------------------------------------
105 * UVC constants 108 * UVC constants
@@ -301,6 +304,13 @@ struct uvc_entity {
301 __u16 type; 304 __u16 type;
302 char name[64]; 305 char name[64];
303 306
307 /* Media controller-related fields. */
308 struct video_device *vdev;
309 struct v4l2_subdev subdev;
310 unsigned int num_pads;
311 unsigned int num_links;
312 struct media_pad *pads;
313
304 union { 314 union {
305 struct { 315 struct {
306 __u16 wObjectiveFocalLengthMin; 316 __u16 wObjectiveFocalLengthMin;
@@ -504,6 +514,10 @@ struct uvc_device {
504 atomic_t nmappings; 514 atomic_t nmappings;
505 515
506 /* Video control interface */ 516 /* Video control interface */
517#ifdef CONFIG_MEDIA_CONTROLLER
518 struct media_device mdev;
519#endif
520 struct v4l2_device vdev;
507 __u16 uvc_version; 521 __u16 uvc_version;
508 __u32 clock_frequency; 522 __u32 clock_frequency;
509 523
@@ -583,6 +597,8 @@ extern unsigned int uvc_timeout_param;
583/* Core driver */ 597/* Core driver */
584extern struct uvc_driver uvc_driver; 598extern struct uvc_driver uvc_driver;
585 599
600extern struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id);
601
586/* Video buffers queue management. */ 602/* Video buffers queue management. */
587extern void uvc_queue_init(struct uvc_video_queue *queue, 603extern void uvc_queue_init(struct uvc_video_queue *queue,
588 enum v4l2_buf_type type, int drop_corrupted); 604 enum v4l2_buf_type type, int drop_corrupted);
@@ -616,6 +632,10 @@ static inline int uvc_queue_streaming(struct uvc_video_queue *queue)
616/* V4L2 interface */ 632/* V4L2 interface */
617extern const struct v4l2_file_operations uvc_fops; 633extern const struct v4l2_file_operations uvc_fops;
618 634
635/* Media controller */
636extern int uvc_mc_register_entities(struct uvc_video_chain *chain);
637extern void uvc_mc_cleanup_entity(struct uvc_entity *entity);
638
619/* Video */ 639/* Video */
620extern int uvc_video_init(struct uvc_streaming *stream); 640extern int uvc_video_init(struct uvc_streaming *stream);
621extern int uvc_video_suspend(struct uvc_streaming *stream); 641extern int uvc_video_suspend(struct uvc_streaming *stream);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 8344fc0ab858..b6c267724e14 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -719,6 +719,15 @@ config MFD_PM8XXX_IRQ
719 This is required to use certain other PM 8xxx features, such as GPIO 719 This is required to use certain other PM 8xxx features, such as GPIO
720 and MPP. 720 and MPP.
721 721
722config MFD_TPS65910
723 bool "TPS65910 Power Management chip"
724 depends on I2C=y
725 select MFD_CORE
726 select GPIO_TPS65910
727 help
728 if you say yes here you get support for the TPS65910 series of
729 Power Management chips.
730
722endif # MFD_SUPPORT 731endif # MFD_SUPPORT
723 732
724menu "Multimedia Capabilities Port drivers" 733menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 1acb8f29a96c..efe3cc33ed92 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -93,3 +93,4 @@ obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o
93obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o 93obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o
94obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o 94obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o
95obj-$(CONFIG_MFD_PM8XXX_IRQ) += pm8xxx-irq.o 95obj-$(CONFIG_MFD_PM8XXX_IRQ) += pm8xxx-irq.o
96obj-$(CONFIG_MFD_TPS65910) += tps65910.o tps65910-irq.o
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c
new file mode 100644
index 000000000000..2bfad5c86cc7
--- /dev/null
+++ b/drivers/mfd/tps65910-irq.c
@@ -0,0 +1,218 @@
1/*
2 * tps65910-irq.c -- TI TPS6591x
3 *
4 * Copyright 2010 Texas Instruments Inc.
5 *
6 * Author: Graeme Gregory <gg@slimlogic.co.uk>
7 * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/bug.h>
20#include <linux/device.h>
21#include <linux/interrupt.h>
22#include <linux/irq.h>
23#include <linux/gpio.h>
24#include <linux/mfd/tps65910.h>
25
26static inline int irq_to_tps65910_irq(struct tps65910 *tps65910,
27 int irq)
28{
29 return (irq - tps65910->irq_base);
30}
31
32/*
33 * This is a threaded IRQ handler so can access I2C/SPI. Since all
34 * interrupts are clear on read the IRQ line will be reasserted and
35 * the physical IRQ will be handled again if another interrupt is
36 * asserted while we run - in the normal course of events this is a
37 * rare occurrence so we save I2C/SPI reads. We're also assuming that
38 * it's rare to get lots of interrupts firing simultaneously so try to
39 * minimise I/O.
40 */
41static irqreturn_t tps65910_irq(int irq, void *irq_data)
42{
43 struct tps65910 *tps65910 = irq_data;
44 u32 irq_sts;
45 u32 irq_mask;
46 u8 reg;
47 int i;
48
49 tps65910->read(tps65910, TPS65910_INT_STS, 1, &reg);
50 irq_sts = reg;
51 tps65910->read(tps65910, TPS65910_INT_STS2, 1, &reg);
52 irq_sts |= reg << 8;
53 switch (tps65910_chip_id(tps65910)) {
54 case TPS65911:
55 tps65910->read(tps65910, TPS65910_INT_STS3, 1, &reg);
56 irq_sts |= reg << 16;
57 }
58
59 tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg);
60 irq_mask = reg;
61 tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg);
62 irq_mask |= reg << 8;
63 switch (tps65910_chip_id(tps65910)) {
64 case TPS65911:
65 tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg);
66 irq_mask |= reg << 16;
67 }
68
69 irq_sts &= ~irq_mask;
70
71 if (!irq_sts)
72 return IRQ_NONE;
73
74 for (i = 0; i < tps65910->irq_num; i++) {
75
76 if (!(irq_sts & (1 << i)))
77 continue;
78
79 handle_nested_irq(tps65910->irq_base + i);
80 }
81
82 /* Write the STS register back to clear IRQs we handled */
83 reg = irq_sts & 0xFF;
84 irq_sts >>= 8;
85 tps65910->write(tps65910, TPS65910_INT_STS, 1, &reg);
86 reg = irq_sts & 0xFF;
87 tps65910->write(tps65910, TPS65910_INT_STS2, 1, &reg);
88 switch (tps65910_chip_id(tps65910)) {
89 case TPS65911:
90 reg = irq_sts >> 8;
91 tps65910->write(tps65910, TPS65910_INT_STS3, 1, &reg);
92 }
93
94 return IRQ_HANDLED;
95}
96
97static void tps65910_irq_lock(struct irq_data *data)
98{
99 struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
100
101 mutex_lock(&tps65910->irq_lock);
102}
103
104static void tps65910_irq_sync_unlock(struct irq_data *data)
105{
106 struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
107 u32 reg_mask;
108 u8 reg;
109
110 tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg);
111 reg_mask = reg;
112 tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg);
113 reg_mask |= reg << 8;
114 switch (tps65910_chip_id(tps65910)) {
115 case TPS65911:
116 tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg);
117 reg_mask |= reg << 16;
118 }
119
120 if (tps65910->irq_mask != reg_mask) {
121 reg = tps65910->irq_mask & 0xFF;
122 tps65910->write(tps65910, TPS65910_INT_MSK, 1, &reg);
123 reg = tps65910->irq_mask >> 8 & 0xFF;
124 tps65910->write(tps65910, TPS65910_INT_MSK2, 1, &reg);
125 switch (tps65910_chip_id(tps65910)) {
126 case TPS65911:
127 reg = tps65910->irq_mask >> 16;
128 tps65910->write(tps65910, TPS65910_INT_MSK3, 1, &reg);
129 }
130 }
131 mutex_unlock(&tps65910->irq_lock);
132}
133
134static void tps65910_irq_enable(struct irq_data *data)
135{
136 struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
137
138 tps65910->irq_mask &= ~( 1 << irq_to_tps65910_irq(tps65910, data->irq));
139}
140
141static void tps65910_irq_disable(struct irq_data *data)
142{
143 struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
144
145 tps65910->irq_mask |= ( 1 << irq_to_tps65910_irq(tps65910, data->irq));
146}
147
148static struct irq_chip tps65910_irq_chip = {
149 .name = "tps65910",
150 .irq_bus_lock = tps65910_irq_lock,
151 .irq_bus_sync_unlock = tps65910_irq_sync_unlock,
152 .irq_disable = tps65910_irq_disable,
153 .irq_enable = tps65910_irq_enable,
154};
155
156int tps65910_irq_init(struct tps65910 *tps65910, int irq,
157 struct tps65910_platform_data *pdata)
158{
159 int ret, cur_irq;
160 int flags = IRQF_ONESHOT;
161
162 if (!irq) {
163 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
164 return -EINVAL;
165 }
166
167 if (!pdata || !pdata->irq_base) {
168 dev_warn(tps65910->dev, "No interrupt support, no IRQ base\n");
169 return -EINVAL;
170 }
171
172 tps65910->irq_mask = 0xFFFFFF;
173
174 mutex_init(&tps65910->irq_lock);
175 tps65910->chip_irq = irq;
176 tps65910->irq_base = pdata->irq_base;
177
178 switch (tps65910_chip_id(tps65910)) {
179 case TPS65910:
180 tps65910->irq_num = TPS65910_NUM_IRQ;
181 case TPS65911:
182 tps65910->irq_num = TPS65911_NUM_IRQ;
183 }
184
185 /* Register with genirq */
186 for (cur_irq = tps65910->irq_base;
187 cur_irq < tps65910->irq_num + tps65910->irq_base;
188 cur_irq++) {
189 irq_set_chip_data(cur_irq, tps65910);
190 irq_set_chip_and_handler(cur_irq, &tps65910_irq_chip,
191 handle_edge_irq);
192 irq_set_nested_thread(cur_irq, 1);
193
194 /* ARM needs us to explicitly flag the IRQ as valid
195 * and will set them noprobe when we do so. */
196#ifdef CONFIG_ARM
197 set_irq_flags(cur_irq, IRQF_VALID);
198#else
199 irq_set_noprobe(cur_irq);
200#endif
201 }
202
203 ret = request_threaded_irq(irq, NULL, tps65910_irq, flags,
204 "tps65910", tps65910);
205
206 irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
207
208 if (ret != 0)
209 dev_err(tps65910->dev, "Failed to request IRQ: %d\n", ret);
210
211 return ret;
212}
213
214int tps65910_irq_exit(struct tps65910 *tps65910)
215{
216 free_irq(tps65910->chip_irq, tps65910);
217 return 0;
218}
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
new file mode 100644
index 000000000000..2229e66d80db
--- /dev/null
+++ b/drivers/mfd/tps65910.c
@@ -0,0 +1,229 @@
1/*
2 * tps65910.c -- TI TPS6591x
3 *
4 * Copyright 2010 Texas Instruments Inc.
5 *
6 * Author: Graeme Gregory <gg@slimlogic.co.uk>
7 * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/init.h>
19#include <linux/slab.h>
20#include <linux/i2c.h>
21#include <linux/gpio.h>
22#include <linux/mfd/core.h>
23#include <linux/mfd/tps65910.h>
24
25static struct mfd_cell tps65910s[] = {
26 {
27 .name = "tps65910-pmic",
28 },
29 {
30 .name = "tps65910-rtc",
31 },
32 {
33 .name = "tps65910-power",
34 },
35};
36
37
38static int tps65910_i2c_read(struct tps65910 *tps65910, u8 reg,
39 int bytes, void *dest)
40{
41 struct i2c_client *i2c = tps65910->i2c_client;
42 struct i2c_msg xfer[2];
43 int ret;
44
45 /* Write register */
46 xfer[0].addr = i2c->addr;
47 xfer[0].flags = 0;
48 xfer[0].len = 1;
49 xfer[0].buf = &reg;
50
51 /* Read data */
52 xfer[1].addr = i2c->addr;
53 xfer[1].flags = I2C_M_RD;
54 xfer[1].len = bytes;
55 xfer[1].buf = dest;
56
57 ret = i2c_transfer(i2c->adapter, xfer, 2);
58 if (ret == 2)
59 ret = 0;
60 else if (ret >= 0)
61 ret = -EIO;
62
63 return ret;
64}
65
66static int tps65910_i2c_write(struct tps65910 *tps65910, u8 reg,
67 int bytes, void *src)
68{
69 struct i2c_client *i2c = tps65910->i2c_client;
70 /* we add 1 byte for device register */
71 u8 msg[TPS65910_MAX_REGISTER + 1];
72 int ret;
73
74 if (bytes > TPS65910_MAX_REGISTER)
75 return -EINVAL;
76
77 msg[0] = reg;
78 memcpy(&msg[1], src, bytes);
79
80 ret = i2c_master_send(i2c, msg, bytes + 1);
81 if (ret < 0)
82 return ret;
83 if (ret != bytes + 1)
84 return -EIO;
85 return 0;
86}
87
88int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
89{
90 u8 data;
91 int err;
92
93 mutex_lock(&tps65910->io_mutex);
94 err = tps65910_i2c_read(tps65910, reg, 1, &data);
95 if (err) {
96 dev_err(tps65910->dev, "read from reg %x failed\n", reg);
97 goto out;
98 }
99
100 data |= mask;
101 err = tps65910_i2c_write(tps65910, reg, 1, &data);
102 if (err)
103 dev_err(tps65910->dev, "write to reg %x failed\n", reg);
104
105out:
106 mutex_unlock(&tps65910->io_mutex);
107 return err;
108}
109EXPORT_SYMBOL_GPL(tps65910_set_bits);
110
111int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
112{
113 u8 data;
114 int err;
115
116 mutex_lock(&tps65910->io_mutex);
117 err = tps65910_i2c_read(tps65910, reg, 1, &data);
118 if (err) {
119 dev_err(tps65910->dev, "read from reg %x failed\n", reg);
120 goto out;
121 }
122
123 data &= mask;
124 err = tps65910_i2c_write(tps65910, reg, 1, &data);
125 if (err)
126 dev_err(tps65910->dev, "write to reg %x failed\n", reg);
127
128out:
129 mutex_unlock(&tps65910->io_mutex);
130 return err;
131}
132EXPORT_SYMBOL_GPL(tps65910_clear_bits);
133
134static int tps65910_i2c_probe(struct i2c_client *i2c,
135 const struct i2c_device_id *id)
136{
137 struct tps65910 *tps65910;
138 struct tps65910_board *pmic_plat_data;
139 struct tps65910_platform_data *init_data;
140 int ret = 0;
141
142 pmic_plat_data = dev_get_platdata(&i2c->dev);
143 if (!pmic_plat_data)
144 return -EINVAL;
145
146 init_data = kzalloc(sizeof(struct tps65910_platform_data), GFP_KERNEL);
147 if (init_data == NULL)
148 return -ENOMEM;
149
150 init_data->irq = pmic_plat_data->irq;
151 init_data->irq_base = pmic_plat_data->irq;
152
153 tps65910 = kzalloc(sizeof(struct tps65910), GFP_KERNEL);
154 if (tps65910 == NULL)
155 return -ENOMEM;
156
157 i2c_set_clientdata(i2c, tps65910);
158 tps65910->dev = &i2c->dev;
159 tps65910->i2c_client = i2c;
160 tps65910->id = id->driver_data;
161 tps65910->read = tps65910_i2c_read;
162 tps65910->write = tps65910_i2c_write;
163 mutex_init(&tps65910->io_mutex);
164
165 ret = mfd_add_devices(tps65910->dev, -1,
166 tps65910s, ARRAY_SIZE(tps65910s),
167 NULL, 0);
168 if (ret < 0)
169 goto err;
170
171 tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base);
172
173 ret = tps65910_irq_init(tps65910, init_data->irq, init_data);
174 if (ret < 0)
175 goto err;
176
177 return ret;
178
179err:
180 mfd_remove_devices(tps65910->dev);
181 kfree(tps65910);
182 return ret;
183}
184
185static int tps65910_i2c_remove(struct i2c_client *i2c)
186{
187 struct tps65910 *tps65910 = i2c_get_clientdata(i2c);
188
189 mfd_remove_devices(tps65910->dev);
190 kfree(tps65910);
191
192 return 0;
193}
194
195static const struct i2c_device_id tps65910_i2c_id[] = {
196 { "tps65910", TPS65910 },
197 { "tps65911", TPS65911 },
198 { }
199};
200MODULE_DEVICE_TABLE(i2c, tps65910_i2c_id);
201
202
203static struct i2c_driver tps65910_i2c_driver = {
204 .driver = {
205 .name = "tps65910",
206 .owner = THIS_MODULE,
207 },
208 .probe = tps65910_i2c_probe,
209 .remove = tps65910_i2c_remove,
210 .id_table = tps65910_i2c_id,
211};
212
213static int __init tps65910_i2c_init(void)
214{
215 return i2c_add_driver(&tps65910_i2c_driver);
216}
217/* init early so consumer devices can complete system boot */
218subsys_initcall(tps65910_i2c_init);
219
220static void __exit tps65910_i2c_exit(void)
221{
222 i2c_del_driver(&tps65910_i2c_driver);
223}
224module_exit(tps65910_i2c_exit);
225
226MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
227MODULE_AUTHOR("Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>");
228MODULE_DESCRIPTION("TPS6591x chip family multi-function driver");
229MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps65911-comparator.c b/drivers/mfd/tps65911-comparator.c
new file mode 100644
index 000000000000..3d2dc56a3d40
--- /dev/null
+++ b/drivers/mfd/tps65911-comparator.c
@@ -0,0 +1,188 @@
1/*
2 * tps65910.c -- TI TPS6591x
3 *
4 * Copyright 2010 Texas Instruments Inc.
5 *
6 * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/err.h>
20#include <linux/platform_device.h>
21#include <linux/debugfs.h>
22#include <linux/gpio.h>
23#include <linux/mfd/tps65910.h>
24
25#define COMP 0
26#define COMP1 1
27#define COMP2 2
28
29/* Comparator 1 voltage selection table in milivolts */
30static const u16 COMP_VSEL_TABLE[] = {
31 0, 2500, 2500, 2500, 2500, 2550, 2600, 2650,
32 2700, 2750, 2800, 2850, 2900, 2950, 3000, 3050,
33 3100, 3150, 3200, 3250, 3300, 3350, 3400, 3450,
34 3500,
35};
36
37struct comparator {
38 const char *name;
39 int reg;
40 int uV_max;
41 const u16 *vsel_table;
42};
43
44static struct comparator tps_comparators[] = {
45 {
46 .name = "COMP1",
47 .reg = TPS65911_VMBCH,
48 .uV_max = 3500,
49 .vsel_table = COMP_VSEL_TABLE,
50 },
51 {
52 .name = "COMP2",
53 .reg = TPS65911_VMBCH2,
54 .uV_max = 3500,
55 .vsel_table = COMP_VSEL_TABLE,
56 },
57};
58
59static int comp_threshold_set(struct tps65910 *tps65910, int id, int voltage)
60{
61 struct comparator tps_comp = tps_comparators[id];
62 int curr_voltage = 0;
63 int ret;
64 u8 index = 0, val;
65
66 if (id == COMP)
67 return 0;
68
69 while (curr_voltage < tps_comp.uV_max) {
70 curr_voltage = tps_comp.vsel_table[index];
71 if (curr_voltage >= voltage)
72 break;
73 else if (curr_voltage < voltage)
74 index ++;
75 }
76
77 if (curr_voltage > tps_comp.uV_max)
78 return -EINVAL;
79
80 val = index << 1;
81 ret = tps65910->write(tps65910, tps_comp.reg, 1, &val);
82
83 return ret;
84}
85
86static int comp_threshold_get(struct tps65910 *tps65910, int id)
87{
88 struct comparator tps_comp = tps_comparators[id];
89 int ret;
90 u8 val;
91
92 if (id == COMP)
93 return 0;
94
95 ret = tps65910->read(tps65910, tps_comp.reg, 1, &val);
96 if (ret < 0)
97 return ret;
98
99 val >>= 1;
100 return tps_comp.vsel_table[val];
101}
102
103static ssize_t comp_threshold_show(struct device *dev,
104 struct device_attribute *attr, char *buf)
105{
106 struct tps65910 *tps65910 = dev_get_drvdata(dev->parent);
107 struct attribute comp_attr = attr->attr;
108 int id, uVolt;
109
110 if (!strcmp(comp_attr.name, "comp1_threshold"))
111 id = COMP1;
112 else if (!strcmp(comp_attr.name, "comp2_threshold"))
113 id = COMP2;
114 else
115 return -EINVAL;
116
117 uVolt = comp_threshold_get(tps65910, id);
118
119 return sprintf(buf, "%d\n", uVolt);
120}
121
122static DEVICE_ATTR(comp1_threshold, S_IRUGO, comp_threshold_show, NULL);
123static DEVICE_ATTR(comp2_threshold, S_IRUGO, comp_threshold_show, NULL);
124
125static __devinit int tps65911_comparator_probe(struct platform_device *pdev)
126{
127 struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
128 struct tps65910_platform_data *pdata = dev_get_platdata(tps65910->dev);
129 int ret;
130
131 ret = comp_threshold_set(tps65910, COMP1, pdata->vmbch_threshold);
132 if (ret < 0) {
133 dev_err(&pdev->dev, "cannot set COMP1 threshold\n");
134 return ret;
135 }
136
137 ret = comp_threshold_set(tps65910, COMP2, pdata->vmbch2_threshold);
138 if (ret < 0) {
139 dev_err(&pdev->dev, "cannot set COMP2 theshold\n");
140 return ret;
141 }
142
143 /* Create sysfs entry */
144 ret = device_create_file(&pdev->dev, &dev_attr_comp1_threshold);
145 if (ret < 0)
146 dev_err(&pdev->dev, "failed to add COMP1 sysfs file\n");
147
148 ret = device_create_file(&pdev->dev, &dev_attr_comp2_threshold);
149 if (ret < 0)
150 dev_err(&pdev->dev, "failed to add COMP2 sysfs file\n");
151
152 return ret;
153}
154
155static __devexit int tps65911_comparator_remove(struct platform_device *pdev)
156{
157 struct tps65910 *tps65910;
158
159 tps65910 = dev_get_drvdata(pdev->dev.parent);
160
161 return 0;
162}
163
164static struct platform_driver tps65911_comparator_driver = {
165 .driver = {
166 .name = "tps65911-comparator",
167 .owner = THIS_MODULE,
168 },
169 .probe = tps65911_comparator_probe,
170 .remove = __devexit_p(tps65911_comparator_remove),
171};
172
173static int __init tps65911_comparator_init(void)
174{
175 return platform_driver_register(&tps65911_comparator_driver);
176}
177subsys_initcall(tps65911_comparator_init);
178
179static void __exit tps65911_comparator_exit(void)
180{
181 platform_driver_unregister(&tps65911_comparator_driver);
182}
183module_exit(tps65911_comparator_exit);
184
185MODULE_AUTHOR("Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>");
186MODULE_DESCRIPTION("TPS65911 comparator driver");
187MODULE_LICENSE("GPL v2");
188MODULE_ALIAS("platform:tps65911-comparator");
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 74f16f167b8e..b0c56313dbbb 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -285,33 +285,26 @@ static void hw_break_val_write(void)
285static int check_and_rewind_pc(char *put_str, char *arg) 285static int check_and_rewind_pc(char *put_str, char *arg)
286{ 286{
287 unsigned long addr = lookup_addr(arg); 287 unsigned long addr = lookup_addr(arg);
288 unsigned long ip;
288 int offset = 0; 289 int offset = 0;
289 290
290 kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs, 291 kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs,
291 NUMREGBYTES); 292 NUMREGBYTES);
292 gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs); 293 gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs);
293 v2printk("Stopped at IP: %lx\n", instruction_pointer(&kgdbts_regs)); 294 ip = instruction_pointer(&kgdbts_regs);
294#ifdef CONFIG_X86 295 v2printk("Stopped at IP: %lx\n", ip);
295 /* On x86 a breakpoint stop requires it to be decremented */ 296#ifdef GDB_ADJUSTS_BREAK_OFFSET
296 if (addr + 1 == kgdbts_regs.ip) 297 /* On some arches, a breakpoint stop requires it to be decremented */
297 offset = -1; 298 if (addr + BREAK_INSTR_SIZE == ip)
298#elif defined(CONFIG_SUPERH) 299 offset = -BREAK_INSTR_SIZE;
299 /* On SUPERH a breakpoint stop requires it to be decremented */
300 if (addr + 2 == kgdbts_regs.pc)
301 offset = -2;
302#endif 300#endif
303 if (strcmp(arg, "silent") && 301 if (strcmp(arg, "silent") && ip + offset != addr) {
304 instruction_pointer(&kgdbts_regs) + offset != addr) {
305 eprintk("kgdbts: BP mismatch %lx expected %lx\n", 302 eprintk("kgdbts: BP mismatch %lx expected %lx\n",
306 instruction_pointer(&kgdbts_regs) + offset, addr); 303 ip + offset, addr);
307 return 1; 304 return 1;
308 } 305 }
309#ifdef CONFIG_X86 306 /* Readjust the instruction pointer if needed */
310 /* On x86 adjust the instruction pointer if needed */ 307 instruction_pointer_set(&kgdbts_regs, ip + offset);
311 kgdbts_regs.ip += offset;
312#elif defined(CONFIG_SUPERH)
313 kgdbts_regs.pc += offset;
314#endif
315 return 0; 308 return 0;
316} 309}
317 310
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 4941e06fe2e1..5da5bea0f9f0 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -51,6 +51,7 @@ static unsigned int fmax = 515633;
51 * is asserted (likewise for RX) 51 * is asserted (likewise for RX)
52 * @sdio: variant supports SDIO 52 * @sdio: variant supports SDIO
53 * @st_clkdiv: true if using a ST-specific clock divider algorithm 53 * @st_clkdiv: true if using a ST-specific clock divider algorithm
54 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
54 */ 55 */
55struct variant_data { 56struct variant_data {
56 unsigned int clkreg; 57 unsigned int clkreg;
@@ -60,6 +61,7 @@ struct variant_data {
60 unsigned int fifohalfsize; 61 unsigned int fifohalfsize;
61 bool sdio; 62 bool sdio;
62 bool st_clkdiv; 63 bool st_clkdiv;
64 bool blksz_datactrl16;
63}; 65};
64 66
65static struct variant_data variant_arm = { 67static struct variant_data variant_arm = {
@@ -92,6 +94,17 @@ static struct variant_data variant_ux500 = {
92 .st_clkdiv = true, 94 .st_clkdiv = true,
93}; 95};
94 96
97static struct variant_data variant_ux500v2 = {
98 .fifosize = 30 * 4,
99 .fifohalfsize = 8 * 4,
100 .clkreg = MCI_CLK_ENABLE,
101 .clkreg_enable = MCI_ST_UX500_HWFCEN,
102 .datalength_bits = 24,
103 .sdio = true,
104 .st_clkdiv = true,
105 .blksz_datactrl16 = true,
106};
107
95/* 108/*
96 * This must be called with host->lock held 109 * This must be called with host->lock held
97 */ 110 */
@@ -465,7 +478,10 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
465 blksz_bits = ffs(data->blksz) - 1; 478 blksz_bits = ffs(data->blksz) - 1;
466 BUG_ON(1 << blksz_bits != data->blksz); 479 BUG_ON(1 << blksz_bits != data->blksz);
467 480
468 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 481 if (variant->blksz_datactrl16)
482 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
483 else
484 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
469 485
470 if (data->flags & MMC_DATA_READ) 486 if (data->flags & MMC_DATA_READ)
471 datactrl |= MCI_DPSM_DIRECTION; 487 datactrl |= MCI_DPSM_DIRECTION;
@@ -1311,9 +1327,14 @@ static struct amba_id mmci_ids[] = {
1311 }, 1327 },
1312 { 1328 {
1313 .id = 0x00480180, 1329 .id = 0x00480180,
1314 .mask = 0x00ffffff, 1330 .mask = 0xf0ffffff,
1315 .data = &variant_ux500, 1331 .data = &variant_ux500,
1316 }, 1332 },
1333 {
1334 .id = 0x10480180,
1335 .mask = 0xf0ffffff,
1336 .data = &variant_ux500v2,
1337 },
1317 { 0, 0 }, 1338 { 0, 0 },
1318}; 1339};
1319 1340
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index bc50d5ea5534..4be8373d43e5 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -33,20 +33,6 @@ config MTD_TESTS
33 should normally be compiled as kernel modules. The modules perform 33 should normally be compiled as kernel modules. The modules perform
34 various checks and verifications when loaded. 34 various checks and verifications when loaded.
35 35
36config MTD_PARTITIONS
37 bool "MTD partitioning support"
38 help
39 If you have a device which needs to divide its flash chip(s) up
40 into multiple 'partitions', each of which appears to the user as
41 a separate MTD device, you require this option to be enabled. If
42 unsure, say 'Y'.
43
44 Note, however, that you don't need this option for the DiskOnChip
45 devices. Partitioning on NFTL 'devices' is a different - that's the
46 'normal' form of partitioning used on a block device.
47
48if MTD_PARTITIONS
49
50config MTD_REDBOOT_PARTS 36config MTD_REDBOOT_PARTS
51 tristate "RedBoot partition table parsing" 37 tristate "RedBoot partition table parsing"
52 ---help--- 38 ---help---
@@ -99,7 +85,7 @@ endif # MTD_REDBOOT_PARTS
99 85
100config MTD_CMDLINE_PARTS 86config MTD_CMDLINE_PARTS
101 bool "Command line partition table parsing" 87 bool "Command line partition table parsing"
102 depends on MTD_PARTITIONS = "y" && MTD = "y" 88 depends on MTD = "y"
103 ---help--- 89 ---help---
104 Allow generic configuration of the MTD partition tables via the kernel 90 Allow generic configuration of the MTD partition tables via the kernel
105 command line. Multiple flash resources are supported for hardware where 91 command line. Multiple flash resources are supported for hardware where
@@ -163,8 +149,6 @@ config MTD_AR7_PARTS
163 ---help--- 149 ---help---
164 TI AR7 partitioning support 150 TI AR7 partitioning support
165 151
166endif # MTD_PARTITIONS
167
168comment "User Modules And Translation Layers" 152comment "User Modules And Translation Layers"
169 153
170config MTD_CHAR 154config MTD_CHAR
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index d578095fb255..39664c4229ff 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -4,8 +4,7 @@
4 4
5# Core functionality. 5# Core functionality.
6obj-$(CONFIG_MTD) += mtd.o 6obj-$(CONFIG_MTD) += mtd.o
7mtd-y := mtdcore.o mtdsuper.o mtdconcat.o 7mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o
8mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
9mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o 8mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o
10 9
11obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o 10obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 09cb7c8d93b4..e1e122f2f929 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -812,12 +812,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
812 break; 812 break;
813 813
814 if (time_after(jiffies, timeo)) { 814 if (time_after(jiffies, timeo)) {
815 /* Urgh. Resume and pretend we weren't here. */ 815 /* Urgh. Resume and pretend we weren't here.
816 map_write(map, CMD(0xd0), adr); 816 * Make sure we're in 'read status' mode if it had finished */
817 /* Make sure we're in 'read status' mode if it had finished */ 817 put_chip(map, chip, adr);
818 map_write(map, CMD(0x70), adr);
819 chip->state = FL_ERASING;
820 chip->oldstate = FL_READY;
821 printk(KERN_ERR "%s: Chip not ready after erase " 818 printk(KERN_ERR "%s: Chip not ready after erase "
822 "suspended: status = 0x%lx\n", map->name, status.x[0]); 819 "suspended: status = 0x%lx\n", map->name, status.x[0]);
823 return -EIO; 820 return -EIO;
@@ -997,7 +994,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
997 994
998 switch(chip->oldstate) { 995 switch(chip->oldstate) {
999 case FL_ERASING: 996 case FL_ERASING:
1000 chip->state = chip->oldstate;
1001 /* What if one interleaved chip has finished and the 997 /* What if one interleaved chip has finished and the
1002 other hasn't? The old code would leave the finished 998 other hasn't? The old code would leave the finished
1003 one in READY mode. That's bad, and caused -EROFS 999 one in READY mode. That's bad, and caused -EROFS
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 0b49266840b9..23175edd5634 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -462,13 +462,14 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
462 cfi_fixup_major_minor(cfi, extp); 462 cfi_fixup_major_minor(cfi, extp);
463 463
464 /* 464 /*
465 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4 465 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
466 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 466 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
467 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf 467 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
468 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf 468 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
469 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
469 */ 470 */
470 if (extp->MajorVersion != '1' || 471 if (extp->MajorVersion != '1' ||
471 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '4'))) { 472 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
472 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 473 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
473 "version %c.%c (%#02x/%#02x).\n", 474 "version %c.%c (%#02x/%#02x).\n",
474 extp->MajorVersion, extp->MinorVersion, 475 extp->MajorVersion, extp->MinorVersion,
@@ -710,9 +711,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
710 * there was an error (so leave the erase 711 * there was an error (so leave the erase
711 * routine to recover from it) or we trying to 712 * routine to recover from it) or we trying to
712 * use the erase-in-progress sector. */ 713 * use the erase-in-progress sector. */
713 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); 714 put_chip(map, chip, adr);
714 chip->state = FL_ERASING;
715 chip->oldstate = FL_READY;
716 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 715 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
717 return -EIO; 716 return -EIO;
718 } 717 }
@@ -762,7 +761,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
762 761
763 switch(chip->oldstate) { 762 switch(chip->oldstate) {
764 case FL_ERASING: 763 case FL_ERASING:
765 chip->state = chip->oldstate;
766 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); 764 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
767 chip->oldstate = FL_READY; 765 chip->oldstate = FL_READY;
768 chip->state = FL_ERASING; 766 chip->state = FL_ERASING;
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index ed56ad3884fb..179814a95f3a 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -296,6 +296,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
296 /* make sure we're in 'read status' mode */ 296 /* make sure we're in 'read status' mode */
297 map_write(map, CMD(0x70), cmd_addr); 297 map_write(map, CMD(0x70), cmd_addr);
298 chip->state = FL_ERASING; 298 chip->state = FL_ERASING;
299 wake_up(&chip->wq);
299 mutex_unlock(&chip->mutex); 300 mutex_unlock(&chip->mutex);
300 printk(KERN_ERR "Chip not ready after erase " 301 printk(KERN_ERR "Chip not ready after erase "
301 "suspended: status = 0x%lx\n", status.x[0]); 302 "suspended: status = 0x%lx\n", status.x[0]);
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 97183c8c9e33..b78f23169d4e 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -294,7 +294,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
294 dev->mtd.priv = dev; 294 dev->mtd.priv = dev;
295 dev->mtd.owner = THIS_MODULE; 295 dev->mtd.owner = THIS_MODULE;
296 296
297 if (add_mtd_device(&dev->mtd)) { 297 if (mtd_device_register(&dev->mtd, NULL, 0)) {
298 /* Device didn't get added, so free the entry */ 298 /* Device didn't get added, so free the entry */
299 goto devinit_err; 299 goto devinit_err;
300 } 300 }
@@ -465,7 +465,7 @@ static void __devexit block2mtd_exit(void)
465 list_for_each_safe(pos, next, &blkmtd_device_list) { 465 list_for_each_safe(pos, next, &blkmtd_device_list) {
466 struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list); 466 struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
467 block2mtd_sync(&dev->mtd); 467 block2mtd_sync(&dev->mtd);
468 del_mtd_device(&dev->mtd); 468 mtd_device_unregister(&dev->mtd);
469 INFO("mtd%d: [%s] removed", dev->mtd.index, 469 INFO("mtd%d: [%s] removed", dev->mtd.index,
470 dev->mtd.name + strlen("block2mtd: ")); 470 dev->mtd.name + strlen("block2mtd: "));
471 list_del(&dev->list); 471 list_del(&dev->list);
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index 5bf5f460e132..f7fbf6025ef2 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -597,7 +597,7 @@ void DoC2k_init(struct mtd_info *mtd)
597 doc2klist = mtd; 597 doc2klist = mtd;
598 mtd->size = this->totlen; 598 mtd->size = this->totlen;
599 mtd->erasesize = this->erasesize; 599 mtd->erasesize = this->erasesize;
600 add_mtd_device(mtd); 600 mtd_device_register(mtd, NULL, 0);
601 return; 601 return;
602 } 602 }
603} 603}
@@ -1185,7 +1185,7 @@ static void __exit cleanup_doc2000(void)
1185 this = mtd->priv; 1185 this = mtd->priv;
1186 doc2klist = this->nextdoc; 1186 doc2klist = this->nextdoc;
1187 1187
1188 del_mtd_device(mtd); 1188 mtd_device_unregister(mtd);
1189 1189
1190 iounmap(this->virtadr); 1190 iounmap(this->virtadr);
1191 kfree(this->chips); 1191 kfree(this->chips);
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
index 0990f7803628..241192f05bc8 100644
--- a/drivers/mtd/devices/doc2001.c
+++ b/drivers/mtd/devices/doc2001.c
@@ -376,7 +376,7 @@ void DoCMil_init(struct mtd_info *mtd)
376 this->nextdoc = docmillist; 376 this->nextdoc = docmillist;
377 docmillist = mtd; 377 docmillist = mtd;
378 mtd->size = this->totlen; 378 mtd->size = this->totlen;
379 add_mtd_device(mtd); 379 mtd_device_register(mtd, NULL, 0);
380 return; 380 return;
381 } 381 }
382} 382}
@@ -826,7 +826,7 @@ static void __exit cleanup_doc2001(void)
826 this = mtd->priv; 826 this = mtd->priv;
827 docmillist = this->nextdoc; 827 docmillist = this->nextdoc;
828 828
829 del_mtd_device(mtd); 829 mtd_device_unregister(mtd);
830 830
831 iounmap(this->virtadr); 831 iounmap(this->virtadr);
832 kfree(this->chips); 832 kfree(this->chips);
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 8b36fa77a195..09ae0adc3ad0 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -499,7 +499,7 @@ void DoCMilPlus_init(struct mtd_info *mtd)
499 docmilpluslist = mtd; 499 docmilpluslist = mtd;
500 mtd->size = this->totlen; 500 mtd->size = this->totlen;
501 mtd->erasesize = this->erasesize; 501 mtd->erasesize = this->erasesize;
502 add_mtd_device(mtd); 502 mtd_device_register(mtd, NULL, 0);
503 return; 503 return;
504 } 504 }
505} 505}
@@ -1091,7 +1091,7 @@ static void __exit cleanup_doc2001plus(void)
1091 this = mtd->priv; 1091 this = mtd->priv;
1092 docmilpluslist = this->nextdoc; 1092 docmilpluslist = this->nextdoc;
1093 1093
1094 del_mtd_device(mtd); 1094 mtd_device_unregister(mtd);
1095 1095
1096 iounmap(this->virtadr); 1096 iounmap(this->virtadr);
1097 kfree(this->chips); 1097 kfree(this->chips);
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 4b829f97d56c..772a0ff89e0f 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -684,9 +684,10 @@ static int __init lart_flash_init (void)
684#endif 684#endif
685 685
686#ifndef HAVE_PARTITIONS 686#ifndef HAVE_PARTITIONS
687 result = add_mtd_device (&mtd); 687 result = mtd_device_register(&mtd, NULL, 0);
688#else 688#else
689 result = add_mtd_partitions (&mtd,lart_partitions, ARRAY_SIZE(lart_partitions)); 689 result = mtd_device_register(&mtd, lart_partitions,
690 ARRAY_SIZE(lart_partitions));
690#endif 691#endif
691 692
692 return (result); 693 return (result);
@@ -695,9 +696,9 @@ static int __init lart_flash_init (void)
695static void __exit lart_flash_exit (void) 696static void __exit lart_flash_exit (void)
696{ 697{
697#ifndef HAVE_PARTITIONS 698#ifndef HAVE_PARTITIONS
698 del_mtd_device (&mtd); 699 mtd_device_unregister(&mtd);
699#else 700#else
700 del_mtd_partitions (&mtd); 701 mtd_device_unregister(&mtd);
701#endif 702#endif
702} 703}
703 704
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 3fb981d4bb51..35180e475c4c 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -27,6 +27,7 @@
27#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/mod_devicetable.h> 28#include <linux/mod_devicetable.h>
29 29
30#include <linux/mtd/cfi.h>
30#include <linux/mtd/mtd.h> 31#include <linux/mtd/mtd.h>
31#include <linux/mtd/partitions.h> 32#include <linux/mtd/partitions.h>
32 33
@@ -55,6 +56,9 @@
55#define OPCODE_EN4B 0xb7 /* Enter 4-byte mode */ 56#define OPCODE_EN4B 0xb7 /* Enter 4-byte mode */
56#define OPCODE_EX4B 0xe9 /* Exit 4-byte mode */ 57#define OPCODE_EX4B 0xe9 /* Exit 4-byte mode */
57 58
59/* Used for Spansion flashes only. */
60#define OPCODE_BRWR 0x17 /* Bank register write */
61
58/* Status Register bits. */ 62/* Status Register bits. */
59#define SR_WIP 1 /* Write in progress */ 63#define SR_WIP 1 /* Write in progress */
60#define SR_WEL 2 /* Write enable latch */ 64#define SR_WEL 2 /* Write enable latch */
@@ -76,6 +80,8 @@
76#define FAST_READ_DUMMY_BYTE 0 80#define FAST_READ_DUMMY_BYTE 0
77#endif 81#endif
78 82
83#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
84
79/****************************************************************************/ 85/****************************************************************************/
80 86
81struct m25p { 87struct m25p {
@@ -158,11 +164,18 @@ static inline int write_disable(struct m25p *flash)
158/* 164/*
159 * Enable/disable 4-byte addressing mode. 165 * Enable/disable 4-byte addressing mode.
160 */ 166 */
161static inline int set_4byte(struct m25p *flash, int enable) 167static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
162{ 168{
163 u8 code = enable ? OPCODE_EN4B : OPCODE_EX4B; 169 switch (JEDEC_MFR(jedec_id)) {
164 170 case CFI_MFR_MACRONIX:
165 return spi_write_then_read(flash->spi, &code, 1, NULL, 0); 171 flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
172 return spi_write(flash->spi, flash->command, 1);
173 default:
174 /* Spansion style */
175 flash->command[0] = OPCODE_BRWR;
176 flash->command[1] = enable << 7;
177 return spi_write(flash->spi, flash->command, 2);
178 }
166} 179}
167 180
168/* 181/*
@@ -668,6 +681,7 @@ static const struct spi_device_id m25p_ids[] = {
668 /* Macronix */ 681 /* Macronix */
669 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) }, 682 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
670 { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) }, 683 { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
684 { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
671 { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) }, 685 { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
672 { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) }, 686 { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
673 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, 687 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
@@ -684,6 +698,10 @@ static const struct spi_device_id m25p_ids[] = {
684 { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) }, 698 { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
685 { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SECT_4K) }, 699 { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SECT_4K) },
686 { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) }, 700 { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
701 { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
702 { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, 0) },
703 { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, 0) },
704 { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
687 { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) }, 705 { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
688 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, 706 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
689 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) }, 707 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
@@ -729,7 +747,10 @@ static const struct spi_device_id m25p_ids[] = {
729 { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) }, 747 { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
730 { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) }, 748 { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
731 749
732 { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) }, 750 { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
751 { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
752 { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
753 { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
733 754
734 /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ 755 /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
735 { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) }, 756 { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
@@ -804,6 +825,8 @@ static int __devinit m25p_probe(struct spi_device *spi)
804 struct m25p *flash; 825 struct m25p *flash;
805 struct flash_info *info; 826 struct flash_info *info;
806 unsigned i; 827 unsigned i;
828 struct mtd_partition *parts = NULL;
829 int nr_parts = 0;
807 830
808 /* Platform data helps sort out which chip type we have, as 831 /* Platform data helps sort out which chip type we have, as
809 * well as how this board partitions it. If we don't have 832 * well as how this board partitions it. If we don't have
@@ -868,9 +891,9 @@ static int __devinit m25p_probe(struct spi_device *spi)
868 * up with the software protection bits set 891 * up with the software protection bits set
869 */ 892 */
870 893
871 if (info->jedec_id >> 16 == 0x1f || 894 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ATMEL ||
872 info->jedec_id >> 16 == 0x89 || 895 JEDEC_MFR(info->jedec_id) == CFI_MFR_INTEL ||
873 info->jedec_id >> 16 == 0xbf) { 896 JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) {
874 write_enable(flash); 897 write_enable(flash);
875 write_sr(flash, 0); 898 write_sr(flash, 0);
876 } 899 }
@@ -888,7 +911,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
888 flash->mtd.read = m25p80_read; 911 flash->mtd.read = m25p80_read;
889 912
890 /* sst flash chips use AAI word program */ 913 /* sst flash chips use AAI word program */
891 if (info->jedec_id >> 16 == 0xbf) 914 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
892 flash->mtd.write = sst_write; 915 flash->mtd.write = sst_write;
893 else 916 else
894 flash->mtd.write = m25p80_write; 917 flash->mtd.write = m25p80_write;
@@ -914,7 +937,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
914 /* enable 4-byte addressing if the device exceeds 16MiB */ 937 /* enable 4-byte addressing if the device exceeds 16MiB */
915 if (flash->mtd.size > 0x1000000) { 938 if (flash->mtd.size > 0x1000000) {
916 flash->addr_width = 4; 939 flash->addr_width = 4;
917 set_4byte(flash, 1); 940 set_4byte(flash, info->jedec_id, 1);
918 } else 941 } else
919 flash->addr_width = 3; 942 flash->addr_width = 3;
920 } 943 }
@@ -945,48 +968,41 @@ static int __devinit m25p_probe(struct spi_device *spi)
945 /* partitions should match sector boundaries; and it may be good to 968 /* partitions should match sector boundaries; and it may be good to
946 * use readonly partitions for writeprotected sectors (BP2..BP0). 969 * use readonly partitions for writeprotected sectors (BP2..BP0).
947 */ 970 */
948 if (mtd_has_partitions()) { 971 if (mtd_has_cmdlinepart()) {
949 struct mtd_partition *parts = NULL; 972 static const char *part_probes[]
950 int nr_parts = 0; 973 = { "cmdlinepart", NULL, };
951
952 if (mtd_has_cmdlinepart()) {
953 static const char *part_probes[]
954 = { "cmdlinepart", NULL, };
955 974
956 nr_parts = parse_mtd_partitions(&flash->mtd, 975 nr_parts = parse_mtd_partitions(&flash->mtd,
957 part_probes, &parts, 0); 976 part_probes, &parts, 0);
958 } 977 }
959 978
960 if (nr_parts <= 0 && data && data->parts) { 979 if (nr_parts <= 0 && data && data->parts) {
961 parts = data->parts; 980 parts = data->parts;
962 nr_parts = data->nr_parts; 981 nr_parts = data->nr_parts;
963 } 982 }
964 983
965#ifdef CONFIG_MTD_OF_PARTS 984#ifdef CONFIG_MTD_OF_PARTS
966 if (nr_parts <= 0 && spi->dev.of_node) { 985 if (nr_parts <= 0 && spi->dev.of_node) {
967 nr_parts = of_mtd_parse_partitions(&spi->dev, 986 nr_parts = of_mtd_parse_partitions(&spi->dev,
968 spi->dev.of_node, &parts); 987 spi->dev.of_node, &parts);
969 } 988 }
970#endif 989#endif
971 990
972 if (nr_parts > 0) { 991 if (nr_parts > 0) {
973 for (i = 0; i < nr_parts; i++) { 992 for (i = 0; i < nr_parts; i++) {
974 DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = " 993 DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
975 "{.name = %s, .offset = 0x%llx, " 994 "{.name = %s, .offset = 0x%llx, "
976 ".size = 0x%llx (%lldKiB) }\n", 995 ".size = 0x%llx (%lldKiB) }\n",
977 i, parts[i].name, 996 i, parts[i].name,
978 (long long)parts[i].offset, 997 (long long)parts[i].offset,
979 (long long)parts[i].size, 998 (long long)parts[i].size,
980 (long long)(parts[i].size >> 10)); 999 (long long)(parts[i].size >> 10));
981 }
982 flash->partitioned = 1;
983 return add_mtd_partitions(&flash->mtd, parts, nr_parts);
984 } 1000 }
985 } else if (data && data->nr_parts) 1001 flash->partitioned = 1;
986 dev_warn(&spi->dev, "ignoring %d default partitions on %s\n", 1002 }
987 data->nr_parts, data->name);
988 1003
989 return add_mtd_device(&flash->mtd) == 1 ? -ENODEV : 0; 1004 return mtd_device_register(&flash->mtd, parts, nr_parts) == 1 ?
1005 -ENODEV : 0;
990} 1006}
991 1007
992 1008
@@ -996,10 +1012,7 @@ static int __devexit m25p_remove(struct spi_device *spi)
996 int status; 1012 int status;
997 1013
998 /* Clean up MTD stuff. */ 1014 /* Clean up MTD stuff. */
999 if (mtd_has_partitions() && flash->partitioned) 1015 status = mtd_device_unregister(&flash->mtd);
1000 status = del_mtd_partitions(&flash->mtd);
1001 else
1002 status = del_mtd_device(&flash->mtd);
1003 if (status == 0) { 1016 if (status == 0) {
1004 kfree(flash->command); 1017 kfree(flash->command);
1005 kfree(flash); 1018 kfree(flash);
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index 6a9a24a80a6d..8423fb6d4f26 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -220,7 +220,7 @@ static int __init ms02nv_init_one(ulong addr)
220 mtd->writesize = 1; 220 mtd->writesize = 1;
221 221
222 ret = -EIO; 222 ret = -EIO;
223 if (add_mtd_device(mtd)) { 223 if (mtd_device_register(mtd, NULL, 0)) {
224 printk(KERN_ERR 224 printk(KERN_ERR
225 "ms02-nv: Unable to register MTD device, aborting!\n"); 225 "ms02-nv: Unable to register MTD device, aborting!\n");
226 goto err_out_csr_res; 226 goto err_out_csr_res;
@@ -262,7 +262,7 @@ static void __exit ms02nv_remove_one(void)
262 262
263 root_ms02nv_mtd = mp->next; 263 root_ms02nv_mtd = mp->next;
264 264
265 del_mtd_device(mtd); 265 mtd_device_unregister(mtd);
266 266
267 release_resource(mp->resource.csr); 267 release_resource(mp->resource.csr);
268 kfree(mp->resource.csr); 268 kfree(mp->resource.csr);
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index c5015cc721d5..13749d458a31 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -637,6 +637,8 @@ add_dataflash_otp(struct spi_device *spi, char *name,
637 struct flash_platform_data *pdata = spi->dev.platform_data; 637 struct flash_platform_data *pdata = spi->dev.platform_data;
638 char *otp_tag = ""; 638 char *otp_tag = "";
639 int err = 0; 639 int err = 0;
640 struct mtd_partition *parts;
641 int nr_parts = 0;
640 642
641 priv = kzalloc(sizeof *priv, GFP_KERNEL); 643 priv = kzalloc(sizeof *priv, GFP_KERNEL);
642 if (!priv) 644 if (!priv)
@@ -675,33 +677,25 @@ add_dataflash_otp(struct spi_device *spi, char *name,
675 pagesize, otp_tag); 677 pagesize, otp_tag);
676 dev_set_drvdata(&spi->dev, priv); 678 dev_set_drvdata(&spi->dev, priv);
677 679
678 if (mtd_has_partitions()) { 680 if (mtd_has_cmdlinepart()) {
679 struct mtd_partition *parts; 681 static const char *part_probes[] = { "cmdlinepart", NULL, };
680 int nr_parts = 0;
681 682
682 if (mtd_has_cmdlinepart()) { 683 nr_parts = parse_mtd_partitions(device, part_probes, &parts,
683 static const char *part_probes[] 684 0);
684 = { "cmdlinepart", NULL, }; 685 }
685
686 nr_parts = parse_mtd_partitions(device,
687 part_probes, &parts, 0);
688 }
689 686
690 if (nr_parts <= 0 && pdata && pdata->parts) { 687 if (nr_parts <= 0 && pdata && pdata->parts) {
691 parts = pdata->parts; 688 parts = pdata->parts;
692 nr_parts = pdata->nr_parts; 689 nr_parts = pdata->nr_parts;
693 } 690 }
694 691
695 if (nr_parts > 0) { 692 if (nr_parts > 0) {
696 priv->partitioned = 1; 693 priv->partitioned = 1;
697 err = add_mtd_partitions(device, parts, nr_parts); 694 err = mtd_device_register(device, parts, nr_parts);
698 goto out; 695 goto out;
699 } 696 }
700 } else if (pdata && pdata->nr_parts)
701 dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
702 pdata->nr_parts, device->name);
703 697
704 if (add_mtd_device(device) == 1) 698 if (mtd_device_register(device, NULL, 0) == 1)
705 err = -ENODEV; 699 err = -ENODEV;
706 700
707out: 701out:
@@ -939,10 +933,7 @@ static int __devexit dataflash_remove(struct spi_device *spi)
939 933
940 DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", dev_name(&spi->dev)); 934 DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", dev_name(&spi->dev));
941 935
942 if (mtd_has_partitions() && flash->partitioned) 936 status = mtd_device_unregister(&flash->mtd);
943 status = del_mtd_partitions(&flash->mtd);
944 else
945 status = del_mtd_device(&flash->mtd);
946 if (status == 0) { 937 if (status == 0) {
947 dev_set_drvdata(&spi->dev, NULL); 938 dev_set_drvdata(&spi->dev, NULL);
948 kfree(flash); 939 kfree(flash);
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index 1483e18971ce..2562689ba6b4 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -104,7 +104,7 @@ static int ram_write(struct mtd_info *mtd, loff_t to, size_t len,
104static void __exit cleanup_mtdram(void) 104static void __exit cleanup_mtdram(void)
105{ 105{
106 if (mtd_info) { 106 if (mtd_info) {
107 del_mtd_device(mtd_info); 107 mtd_device_unregister(mtd_info);
108 vfree(mtd_info->priv); 108 vfree(mtd_info->priv);
109 kfree(mtd_info); 109 kfree(mtd_info);
110 } 110 }
@@ -133,9 +133,8 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
133 mtd->read = ram_read; 133 mtd->read = ram_read;
134 mtd->write = ram_write; 134 mtd->write = ram_write;
135 135
136 if (add_mtd_device(mtd)) { 136 if (mtd_device_register(mtd, NULL, 0))
137 return -EIO; 137 return -EIO;
138 }
139 138
140 return 0; 139 return 0;
141} 140}
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 8d28fa02a5a2..23423bd00b06 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -115,7 +115,7 @@ static void unregister_devices(void)
115 struct phram_mtd_list *this, *safe; 115 struct phram_mtd_list *this, *safe;
116 116
117 list_for_each_entry_safe(this, safe, &phram_list, list) { 117 list_for_each_entry_safe(this, safe, &phram_list, list) {
118 del_mtd_device(&this->mtd); 118 mtd_device_unregister(&this->mtd);
119 iounmap(this->mtd.priv); 119 iounmap(this->mtd.priv);
120 kfree(this->mtd.name); 120 kfree(this->mtd.name);
121 kfree(this); 121 kfree(this);
@@ -153,7 +153,7 @@ static int register_device(char *name, unsigned long start, unsigned long len)
153 new->mtd.writesize = 1; 153 new->mtd.writesize = 1;
154 154
155 ret = -EAGAIN; 155 ret = -EAGAIN;
156 if (add_mtd_device(&new->mtd)) { 156 if (mtd_device_register(&new->mtd, NULL, 0)) {
157 pr_err("Failed to register new device\n"); 157 pr_err("Failed to register new device\n");
158 goto out2; 158 goto out2;
159 } 159 }
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index 41b8cdcc64cb..ecff765579dd 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -798,7 +798,7 @@ static int __init init_pmc551(void)
798 mtd->writesize = 1; 798 mtd->writesize = 1;
799 mtd->owner = THIS_MODULE; 799 mtd->owner = THIS_MODULE;
800 800
801 if (add_mtd_device(mtd)) { 801 if (mtd_device_register(mtd, NULL, 0)) {
802 printk(KERN_NOTICE "pmc551: Failed to register new device\n"); 802 printk(KERN_NOTICE "pmc551: Failed to register new device\n");
803 pci_iounmap(PCI_Device, priv->start); 803 pci_iounmap(PCI_Device, priv->start);
804 kfree(mtd->priv); 804 kfree(mtd->priv);
@@ -806,7 +806,7 @@ static int __init init_pmc551(void)
806 break; 806 break;
807 } 807 }
808 808
809 /* Keep a reference as the add_mtd_device worked */ 809 /* Keep a reference as the mtd_device_register worked */
810 pci_dev_get(PCI_Device); 810 pci_dev_get(PCI_Device);
811 811
812 printk(KERN_NOTICE "Registered pmc551 memory device.\n"); 812 printk(KERN_NOTICE "Registered pmc551 memory device.\n");
@@ -856,7 +856,7 @@ static void __exit cleanup_pmc551(void)
856 pci_dev_put(priv->dev); 856 pci_dev_put(priv->dev);
857 857
858 kfree(mtd->priv); 858 kfree(mtd->priv);
859 del_mtd_device(mtd); 859 mtd_device_unregister(mtd);
860 kfree(mtd); 860 kfree(mtd);
861 found++; 861 found++;
862 } 862 }
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 592016a0668f..e585263161b9 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -210,7 +210,7 @@ static int register_device(char *name, unsigned long start, unsigned long length
210 (*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ; 210 (*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ;
211 (*curmtd)->mtdinfo->writesize = 1; 211 (*curmtd)->mtdinfo->writesize = 1;
212 212
213 if (add_mtd_device((*curmtd)->mtdinfo)) { 213 if (mtd_device_register((*curmtd)->mtdinfo, NULL, 0)) {
214 E("slram: Failed to register new device\n"); 214 E("slram: Failed to register new device\n");
215 iounmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start); 215 iounmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start);
216 kfree((*curmtd)->mtdinfo->priv); 216 kfree((*curmtd)->mtdinfo->priv);
@@ -231,7 +231,7 @@ static void unregister_devices(void)
231 231
232 while (slram_mtdlist) { 232 while (slram_mtdlist) {
233 nextitem = slram_mtdlist->next; 233 nextitem = slram_mtdlist->next;
234 del_mtd_device(slram_mtdlist->mtdinfo); 234 mtd_device_unregister(slram_mtdlist->mtdinfo);
235 iounmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start); 235 iounmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start);
236 kfree(slram_mtdlist->mtdinfo->priv); 236 kfree(slram_mtdlist->mtdinfo->priv);
237 kfree(slram_mtdlist->mtdinfo); 237 kfree(slram_mtdlist->mtdinfo);
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index c163e619abc9..1e2c430aaad2 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -66,7 +66,7 @@ struct flash_info {
66 66
67#define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd) 67#define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd)
68 68
69static struct flash_info __initdata sst25l_flash_info[] = { 69static struct flash_info __devinitdata sst25l_flash_info[] = {
70 {"sst25lf020a", 0xbf43, 256, 1024, 4096}, 70 {"sst25lf020a", 0xbf43, 256, 1024, 4096},
71 {"sst25lf040a", 0xbf44, 256, 2048, 4096}, 71 {"sst25lf040a", 0xbf44, 256, 2048, 4096},
72}; 72};
@@ -381,6 +381,8 @@ static int __devinit sst25l_probe(struct spi_device *spi)
381 struct sst25l_flash *flash; 381 struct sst25l_flash *flash;
382 struct flash_platform_data *data; 382 struct flash_platform_data *data;
383 int ret, i; 383 int ret, i;
384 struct mtd_partition *parts = NULL;
385 int nr_parts = 0;
384 386
385 flash_info = sst25l_match_device(spi); 387 flash_info = sst25l_match_device(spi);
386 if (!flash_info) 388 if (!flash_info)
@@ -420,46 +422,37 @@ static int __devinit sst25l_probe(struct spi_device *spi)
420 flash->mtd.erasesize, flash->mtd.erasesize / 1024, 422 flash->mtd.erasesize, flash->mtd.erasesize / 1024,
421 flash->mtd.numeraseregions); 423 flash->mtd.numeraseregions);
422 424
423 if (mtd_has_partitions()) {
424 struct mtd_partition *parts = NULL;
425 int nr_parts = 0;
426 425
427 if (mtd_has_cmdlinepart()) { 426 if (mtd_has_cmdlinepart()) {
428 static const char *part_probes[] = 427 static const char *part_probes[] = {"cmdlinepart", NULL};
429 {"cmdlinepart", NULL};
430 428
431 nr_parts = parse_mtd_partitions(&flash->mtd, 429 nr_parts = parse_mtd_partitions(&flash->mtd,
432 part_probes, 430 part_probes,
433 &parts, 0); 431 &parts, 0);
434 } 432 }
435 433
436 if (nr_parts <= 0 && data && data->parts) { 434 if (nr_parts <= 0 && data && data->parts) {
437 parts = data->parts; 435 parts = data->parts;
438 nr_parts = data->nr_parts; 436 nr_parts = data->nr_parts;
439 } 437 }
440 438
441 if (nr_parts > 0) { 439 if (nr_parts > 0) {
442 for (i = 0; i < nr_parts; i++) { 440 for (i = 0; i < nr_parts; i++) {
443 DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = " 441 DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
444 "{.name = %s, .offset = 0x%llx, " 442 "{.name = %s, .offset = 0x%llx, "
445 ".size = 0x%llx (%lldKiB) }\n", 443 ".size = 0x%llx (%lldKiB) }\n",
446 i, parts[i].name, 444 i, parts[i].name,
447 (long long)parts[i].offset, 445 (long long)parts[i].offset,
448 (long long)parts[i].size, 446 (long long)parts[i].size,
449 (long long)(parts[i].size >> 10)); 447 (long long)(parts[i].size >> 10));
450 }
451
452 flash->partitioned = 1;
453 return add_mtd_partitions(&flash->mtd,
454 parts, nr_parts);
455 } 448 }
456 449
457 } else if (data && data->nr_parts) { 450 flash->partitioned = 1;
458 dev_warn(&spi->dev, "ignoring %d default partitions on %s\n", 451 return mtd_device_register(&flash->mtd, parts,
459 data->nr_parts, data->name); 452 nr_parts);
460 } 453 }
461 454
462 ret = add_mtd_device(&flash->mtd); 455 ret = mtd_device_register(&flash->mtd, NULL, 0);
463 if (ret == 1) { 456 if (ret == 1) {
464 kfree(flash); 457 kfree(flash);
465 dev_set_drvdata(&spi->dev, NULL); 458 dev_set_drvdata(&spi->dev, NULL);
@@ -469,15 +462,12 @@ static int __devinit sst25l_probe(struct spi_device *spi)
469 return 0; 462 return 0;
470} 463}
471 464
472static int __exit sst25l_remove(struct spi_device *spi) 465static int __devexit sst25l_remove(struct spi_device *spi)
473{ 466{
474 struct sst25l_flash *flash = dev_get_drvdata(&spi->dev); 467 struct sst25l_flash *flash = dev_get_drvdata(&spi->dev);
475 int ret; 468 int ret;
476 469
477 if (mtd_has_partitions() && flash->partitioned) 470 ret = mtd_device_unregister(&flash->mtd);
478 ret = del_mtd_partitions(&flash->mtd);
479 else
480 ret = del_mtd_device(&flash->mtd);
481 if (ret == 0) 471 if (ret == 0)
482 kfree(flash); 472 kfree(flash);
483 return ret; 473 return ret;
@@ -490,7 +480,7 @@ static struct spi_driver sst25l_driver = {
490 .owner = THIS_MODULE, 480 .owner = THIS_MODULE,
491 }, 481 },
492 .probe = sst25l_probe, 482 .probe = sst25l_probe,
493 .remove = __exit_p(sst25l_remove), 483 .remove = __devexit_p(sst25l_remove),
494}; 484};
495 485
496static int __init sst25l_init(void) 486static int __init sst25l_init(void)
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 12679925b420..65655dd59e1f 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -313,12 +313,7 @@ static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
313 if (ret) { 313 if (ret) {
314 /* Oops. something got wrong. */ 314 /* Oops. something got wrong. */
315 /* Resume and pretend we weren't here. */ 315 /* Resume and pretend we weren't here. */
316 map_write(map, CMD(LPDDR_RESUME), 316 put_chip(map, chip);
317 map->pfow_base + PFOW_COMMAND_CODE);
318 map_write(map, CMD(LPDDR_START_EXECUTION),
319 map->pfow_base + PFOW_COMMAND_EXECUTE);
320 chip->state = FL_ERASING;
321 chip->oldstate = FL_READY;
322 printk(KERN_ERR "%s: suspend operation failed." 317 printk(KERN_ERR "%s: suspend operation failed."
323 "State may be wrong \n", map->name); 318 "State may be wrong \n", map->name);
324 return -EIO; 319 return -EIO;
@@ -383,7 +378,6 @@ static void put_chip(struct map_info *map, struct flchip *chip)
383 378
384 switch (chip->oldstate) { 379 switch (chip->oldstate) {
385 case FL_ERASING: 380 case FL_ERASING:
386 chip->state = chip->oldstate;
387 map_write(map, CMD(LPDDR_RESUME), 381 map_write(map, CMD(LPDDR_RESUME),
388 map->pfow_base + PFOW_COMMAND_CODE); 382 map->pfow_base + PFOW_COMMAND_CODE);
389 map_write(map, CMD(LPDDR_START_EXECUTION), 383 map_write(map, CMD(LPDDR_START_EXECUTION),
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 5069111c81cc..c0c328c5b133 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -82,7 +82,6 @@ config MTD_PHYSMAP_OF
82config MTD_PMC_MSP_EVM 82config MTD_PMC_MSP_EVM
83 tristate "CFI Flash device mapped on PMC-Sierra MSP" 83 tristate "CFI Flash device mapped on PMC-Sierra MSP"
84 depends on PMC_MSP && MTD_CFI 84 depends on PMC_MSP && MTD_CFI
85 select MTD_PARTITIONS
86 help 85 help
87 This provides a 'mapping' driver which supports the way 86 This provides a 'mapping' driver which supports the way
88 in which user-programmable flash chips are connected on the 87 in which user-programmable flash chips are connected on the
@@ -122,7 +121,7 @@ config MTD_SC520CDP
122 121
123config MTD_NETSC520 122config MTD_NETSC520
124 tristate "CFI Flash device mapped on AMD NetSc520" 123 tristate "CFI Flash device mapped on AMD NetSc520"
125 depends on X86 && MTD_CFI && MTD_PARTITIONS 124 depends on X86 && MTD_CFI
126 help 125 help
127 This enables access routines for the flash chips on the AMD NetSc520 126 This enables access routines for the flash chips on the AMD NetSc520
128 demonstration board. If you have one of these boards and would like 127 demonstration board. If you have one of these boards and would like
@@ -131,7 +130,6 @@ config MTD_NETSC520
131config MTD_TS5500 130config MTD_TS5500
132 tristate "JEDEC Flash device mapped on Technologic Systems TS-5500" 131 tristate "JEDEC Flash device mapped on Technologic Systems TS-5500"
133 depends on X86 132 depends on X86
134 select MTD_PARTITIONS
135 select MTD_JEDECPROBE 133 select MTD_JEDECPROBE
136 select MTD_CFI_AMDSTD 134 select MTD_CFI_AMDSTD
137 help 135 help
@@ -149,7 +147,7 @@ config MTD_TS5500
149 147
150config MTD_SBC_GXX 148config MTD_SBC_GXX
151 tristate "CFI Flash device mapped on Arcom SBC-GXx boards" 149 tristate "CFI Flash device mapped on Arcom SBC-GXx boards"
152 depends on X86 && MTD_CFI_INTELEXT && MTD_PARTITIONS && MTD_COMPLEX_MAPPINGS 150 depends on X86 && MTD_CFI_INTELEXT && MTD_COMPLEX_MAPPINGS
153 help 151 help
154 This provides a driver for the on-board flash of Arcom Control 152 This provides a driver for the on-board flash of Arcom Control
155 Systems' SBC-GXn family of boards, formerly known as SBC-MediaGX. 153 Systems' SBC-GXn family of boards, formerly known as SBC-MediaGX.
@@ -161,7 +159,6 @@ config MTD_SBC_GXX
161config MTD_PXA2XX 159config MTD_PXA2XX
162 tristate "CFI Flash device mapped on Intel XScale PXA2xx based boards" 160 tristate "CFI Flash device mapped on Intel XScale PXA2xx based boards"
163 depends on (PXA25x || PXA27x) && MTD_CFI_INTELEXT 161 depends on (PXA25x || PXA27x) && MTD_CFI_INTELEXT
164 select MTD_PARTITIONS
165 help 162 help
166 This provides a driver for the NOR flash attached to a PXA2xx chip. 163 This provides a driver for the NOR flash attached to a PXA2xx chip.
167 164
@@ -185,7 +182,7 @@ config MTD_VMAX
185 182
186config MTD_SCx200_DOCFLASH 183config MTD_SCx200_DOCFLASH
187 tristate "Flash device mapped with DOCCS on NatSemi SCx200" 184 tristate "Flash device mapped with DOCCS on NatSemi SCx200"
188 depends on SCx200 && MTD_CFI && MTD_PARTITIONS 185 depends on SCx200 && MTD_CFI
189 help 186 help
190 Enable support for a flash chip mapped using the DOCCS signal on a 187 Enable support for a flash chip mapped using the DOCCS signal on a
191 National Semiconductor SCx200 processor. 188 National Semiconductor SCx200 processor.
@@ -247,7 +244,7 @@ config MTD_TSUNAMI
247 244
248config MTD_NETtel 245config MTD_NETtel
249 tristate "CFI flash device on SnapGear/SecureEdge" 246 tristate "CFI flash device on SnapGear/SecureEdge"
250 depends on X86 && MTD_PARTITIONS && MTD_JEDECPROBE 247 depends on X86 && MTD_JEDECPROBE
251 help 248 help
252 Support for flash chips on NETtel/SecureEdge/SnapGear boards. 249 Support for flash chips on NETtel/SecureEdge/SnapGear boards.
253 250
@@ -269,7 +266,7 @@ config MTD_LANTIQ
269 266
270config MTD_DILNETPC 267config MTD_DILNETPC
271 tristate "CFI Flash device mapped on DIL/Net PC" 268 tristate "CFI Flash device mapped on DIL/Net PC"
272 depends on X86 && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN 269 depends on X86 && MTD_CFI_INTELEXT && BROKEN
273 help 270 help
274 MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP". 271 MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP".
275 For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm> 272 For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm>
@@ -355,7 +352,7 @@ config MTD_CDB89712
355 352
356config MTD_SA1100 353config MTD_SA1100
357 tristate "CFI Flash device mapped on StrongARM SA11x0" 354 tristate "CFI Flash device mapped on StrongARM SA11x0"
358 depends on MTD_CFI && ARCH_SA1100 && MTD_PARTITIONS 355 depends on MTD_CFI && ARCH_SA1100
359 help 356 help
360 This enables access to the flash chips on most platforms based on 357 This enables access to the flash chips on most platforms based on
361 the SA1100 and SA1110, including the Assabet and the Compaq iPAQ. 358 the SA1100 and SA1110, including the Assabet and the Compaq iPAQ.
@@ -389,7 +386,7 @@ config MTD_IXP2000
389 386
390config MTD_FORTUNET 387config MTD_FORTUNET
391 tristate "CFI Flash device mapped on the FortuNet board" 388 tristate "CFI Flash device mapped on the FortuNet board"
392 depends on MTD_CFI && MTD_PARTITIONS && SA1100_FORTUNET 389 depends on MTD_CFI && SA1100_FORTUNET
393 help 390 help
394 This enables access to the Flash on the FortuNet board. If you 391 This enables access to the Flash on the FortuNet board. If you
395 have such a board, say 'Y'. 392 have such a board, say 'Y'.
@@ -461,7 +458,6 @@ config MTD_PCMCIA_ANONYMOUS
461config MTD_BFIN_ASYNC 458config MTD_BFIN_ASYNC
462 tristate "Blackfin BF533-STAMP Flash Chip Support" 459 tristate "Blackfin BF533-STAMP Flash Chip Support"
463 depends on BFIN533_STAMP && MTD_CFI && MTD_COMPLEX_MAPPINGS 460 depends on BFIN533_STAMP && MTD_CFI && MTD_COMPLEX_MAPPINGS
464 select MTD_PARTITIONS
465 default y 461 default y
466 help 462 help
467 Map driver which allows for simultaneous utilization of 463 Map driver which allows for simultaneous utilization of
@@ -473,7 +469,6 @@ config MTD_GPIO_ADDR
473 tristate "GPIO-assisted Flash Chip Support" 469 tristate "GPIO-assisted Flash Chip Support"
474 depends on GENERIC_GPIO || GPIOLIB 470 depends on GENERIC_GPIO || GPIOLIB
475 depends on MTD_COMPLEX_MAPPINGS 471 depends on MTD_COMPLEX_MAPPINGS
476 select MTD_PARTITIONS
477 help 472 help
478 Map driver which allows flashes to be partially physically addressed 473 Map driver which allows flashes to be partially physically addressed
479 and assisted by GPIOs. 474 and assisted by GPIOs.
@@ -482,14 +477,13 @@ config MTD_GPIO_ADDR
482 477
483config MTD_UCLINUX 478config MTD_UCLINUX
484 bool "Generic uClinux RAM/ROM filesystem support" 479 bool "Generic uClinux RAM/ROM filesystem support"
485 depends on MTD_PARTITIONS && MTD_RAM=y && !MMU 480 depends on MTD_RAM=y && !MMU
486 help 481 help
487 Map driver to support image based filesystems for uClinux. 482 Map driver to support image based filesystems for uClinux.
488 483
489config MTD_WRSBC8260 484config MTD_WRSBC8260
490 tristate "Map driver for WindRiver PowerQUICC II MPC82xx board" 485 tristate "Map driver for WindRiver PowerQUICC II MPC82xx board"
491 depends on (SBC82xx || SBC8560) 486 depends on (SBC82xx || SBC8560)
492 select MTD_PARTITIONS
493 select MTD_MAP_BANK_WIDTH_4 487 select MTD_MAP_BANK_WIDTH_4
494 select MTD_MAP_BANK_WIDTH_1 488 select MTD_MAP_BANK_WIDTH_1
495 select MTD_CFI_I1 489 select MTD_CFI_I1
@@ -502,7 +496,6 @@ config MTD_WRSBC8260
502config MTD_DMV182 496config MTD_DMV182
503 tristate "Map driver for Dy-4 SVME/DMV-182 board." 497 tristate "Map driver for Dy-4 SVME/DMV-182 board."
504 depends on DMV182 498 depends on DMV182
505 select MTD_PARTITIONS
506 select MTD_MAP_BANK_WIDTH_32 499 select MTD_MAP_BANK_WIDTH_32
507 select MTD_CFI_I8 500 select MTD_CFI_I8
508 select MTD_CFI_AMDSTD 501 select MTD_CFI_AMDSTD
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 92de7e3a49a5..e2875d6fe129 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -82,7 +82,7 @@ static void amd76xrom_cleanup(struct amd76xrom_window *window)
82 if (map->rsrc.parent) { 82 if (map->rsrc.parent) {
83 release_resource(&map->rsrc); 83 release_resource(&map->rsrc);
84 } 84 }
85 del_mtd_device(map->mtd); 85 mtd_device_unregister(map->mtd);
86 map_destroy(map->mtd); 86 map_destroy(map->mtd);
87 list_del(&map->list); 87 list_del(&map->list);
88 kfree(map); 88 kfree(map);
@@ -262,7 +262,7 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
262 262
263 /* Now that the mtd devices is complete claim and export it */ 263 /* Now that the mtd devices is complete claim and export it */
264 map->mtd->owner = THIS_MODULE; 264 map->mtd->owner = THIS_MODULE;
265 if (add_mtd_device(map->mtd)) { 265 if (mtd_device_register(map->mtd, NULL, 0)) {
266 map_destroy(map->mtd); 266 map_destroy(map->mtd);
267 map->mtd = NULL; 267 map->mtd = NULL;
268 goto out; 268 goto out;
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
index 53664188fc47..e5bfd0e093bb 100644
--- a/drivers/mtd/maps/autcpu12-nvram.c
+++ b/drivers/mtd/maps/autcpu12-nvram.c
@@ -88,7 +88,7 @@ map:
88 sram_mtd->owner = THIS_MODULE; 88 sram_mtd->owner = THIS_MODULE;
89 sram_mtd->erasesize = 16; 89 sram_mtd->erasesize = 16;
90 90
91 if (add_mtd_device(sram_mtd)) { 91 if (mtd_device_register(sram_mtd, NULL, 0)) {
92 printk("NV-RAM device addition failed\n"); 92 printk("NV-RAM device addition failed\n");
93 err = -ENOMEM; 93 err = -ENOMEM;
94 goto out_probe; 94 goto out_probe;
@@ -111,7 +111,7 @@ out:
111static void __exit cleanup_autcpu12_maps(void) 111static void __exit cleanup_autcpu12_maps(void)
112{ 112{
113 if (sram_mtd) { 113 if (sram_mtd) {
114 del_mtd_device(sram_mtd); 114 mtd_device_unregister(sram_mtd);
115 map_destroy(sram_mtd); 115 map_destroy(sram_mtd);
116 iounmap((void *)autcpu12_sram_map.virt); 116 iounmap((void *)autcpu12_sram_map.virt);
117 } 117 }
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
index 1f3049590d9e..608967fe74c6 100644
--- a/drivers/mtd/maps/bcm963xx-flash.c
+++ b/drivers/mtd/maps/bcm963xx-flash.c
@@ -224,8 +224,8 @@ probe_ok:
224 goto err_probe; 224 goto err_probe;
225 } 225 }
226 226
227 return add_mtd_partitions(bcm963xx_mtd_info, parsed_parts, 227 return mtd_device_register(bcm963xx_mtd_info, parsed_parts,
228 parsed_nr_parts); 228 parsed_nr_parts);
229 229
230err_probe: 230err_probe:
231 iounmap(bcm963xx_map.virt); 231 iounmap(bcm963xx_map.virt);
@@ -235,7 +235,7 @@ err_probe:
235static int bcm963xx_remove(struct platform_device *pdev) 235static int bcm963xx_remove(struct platform_device *pdev)
236{ 236{
237 if (bcm963xx_mtd_info) { 237 if (bcm963xx_mtd_info) {
238 del_mtd_partitions(bcm963xx_mtd_info); 238 mtd_device_unregister(bcm963xx_mtd_info);
239 map_destroy(bcm963xx_mtd_info); 239 map_destroy(bcm963xx_mtd_info);
240 } 240 }
241 241
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 85dd18193cf2..d4297a97e100 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -41,9 +41,7 @@ struct async_state {
41 uint32_t flash_ambctl0, flash_ambctl1; 41 uint32_t flash_ambctl0, flash_ambctl1;
42 uint32_t save_ambctl0, save_ambctl1; 42 uint32_t save_ambctl0, save_ambctl1;
43 unsigned long irq_flags; 43 unsigned long irq_flags;
44#ifdef CONFIG_MTD_PARTITIONS
45 struct mtd_partition *parts; 44 struct mtd_partition *parts;
46#endif
47}; 45};
48 46
49static void switch_to_flash(struct async_state *state) 47static void switch_to_flash(struct async_state *state)
@@ -124,9 +122,7 @@ static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const voi
124 switch_back(state); 122 switch_back(state);
125} 123}
126 124
127#ifdef CONFIG_MTD_PARTITIONS
128static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL }; 125static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
129#endif
130 126
131static int __devinit bfin_flash_probe(struct platform_device *pdev) 127static int __devinit bfin_flash_probe(struct platform_device *pdev)
132{ 128{
@@ -169,22 +165,17 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
169 return -ENXIO; 165 return -ENXIO;
170 } 166 }
171 167
172#ifdef CONFIG_MTD_PARTITIONS
173 ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0); 168 ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0);
174 if (ret > 0) { 169 if (ret > 0) {
175 pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n"); 170 pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n");
176 add_mtd_partitions(state->mtd, pdata->parts, ret); 171 mtd_device_register(state->mtd, pdata->parts, ret);
177 state->parts = pdata->parts; 172 state->parts = pdata->parts;
178
179 } else if (pdata->nr_parts) { 173 } else if (pdata->nr_parts) {
180 pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n"); 174 pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n");
181 add_mtd_partitions(state->mtd, pdata->parts, pdata->nr_parts); 175 mtd_device_register(state->mtd, pdata->parts, pdata->nr_parts);
182 176 } else {
183 } else
184#endif
185 {
186 pr_devinit(KERN_NOTICE DRIVER_NAME ": no partition info available, registering whole flash at once\n"); 177 pr_devinit(KERN_NOTICE DRIVER_NAME ": no partition info available, registering whole flash at once\n");
187 add_mtd_device(state->mtd); 178 mtd_device_register(state->mtd, NULL, 0);
188 } 179 }
189 180
190 platform_set_drvdata(pdev, state); 181 platform_set_drvdata(pdev, state);
@@ -196,10 +187,8 @@ static int __devexit bfin_flash_remove(struct platform_device *pdev)
196{ 187{
197 struct async_state *state = platform_get_drvdata(pdev); 188 struct async_state *state = platform_get_drvdata(pdev);
198 gpio_free(state->enet_flash_pin); 189 gpio_free(state->enet_flash_pin);
199#ifdef CONFIG_MTD_PARTITIONS 190 mtd_device_unregister(state->mtd);
200 del_mtd_partitions(state->mtd);
201 kfree(state->parts); 191 kfree(state->parts);
202#endif
203 map_destroy(state->mtd); 192 map_destroy(state->mtd);
204 kfree(state); 193 kfree(state);
205 return 0; 194 return 0;
diff --git a/drivers/mtd/maps/cdb89712.c b/drivers/mtd/maps/cdb89712.c
index 8d92d8db9a98..c29cbf87ea0c 100644
--- a/drivers/mtd/maps/cdb89712.c
+++ b/drivers/mtd/maps/cdb89712.c
@@ -75,7 +75,7 @@ static int __init init_cdb89712_flash (void)
75 75
76 flash_mtd->owner = THIS_MODULE; 76 flash_mtd->owner = THIS_MODULE;
77 77
78 if (add_mtd_device(flash_mtd)) { 78 if (mtd_device_register(flash_mtd, NULL, 0)) {
79 printk("FLASH device addition failed\n"); 79 printk("FLASH device addition failed\n");
80 err = -ENOMEM; 80 err = -ENOMEM;
81 goto out_probe; 81 goto out_probe;
@@ -141,7 +141,7 @@ static int __init init_cdb89712_sram (void)
141 sram_mtd->owner = THIS_MODULE; 141 sram_mtd->owner = THIS_MODULE;
142 sram_mtd->erasesize = 16; 142 sram_mtd->erasesize = 16;
143 143
144 if (add_mtd_device(sram_mtd)) { 144 if (mtd_device_register(sram_mtd, NULL, 0)) {
145 printk("SRAM device addition failed\n"); 145 printk("SRAM device addition failed\n");
146 err = -ENOMEM; 146 err = -ENOMEM;
147 goto out_probe; 147 goto out_probe;
@@ -209,7 +209,7 @@ static int __init init_cdb89712_bootrom (void)
209 bootrom_mtd->owner = THIS_MODULE; 209 bootrom_mtd->owner = THIS_MODULE;
210 bootrom_mtd->erasesize = 0x10000; 210 bootrom_mtd->erasesize = 0x10000;
211 211
212 if (add_mtd_device(bootrom_mtd)) { 212 if (mtd_device_register(bootrom_mtd, NULL, 0)) {
213 printk("BootROM device addition failed\n"); 213 printk("BootROM device addition failed\n");
214 err = -ENOMEM; 214 err = -ENOMEM;
215 goto out_probe; 215 goto out_probe;
@@ -249,21 +249,21 @@ static int __init init_cdb89712_maps(void)
249static void __exit cleanup_cdb89712_maps(void) 249static void __exit cleanup_cdb89712_maps(void)
250{ 250{
251 if (sram_mtd) { 251 if (sram_mtd) {
252 del_mtd_device(sram_mtd); 252 mtd_device_unregister(sram_mtd);
253 map_destroy(sram_mtd); 253 map_destroy(sram_mtd);
254 iounmap((void *)cdb89712_sram_map.virt); 254 iounmap((void *)cdb89712_sram_map.virt);
255 release_resource (&cdb89712_sram_resource); 255 release_resource (&cdb89712_sram_resource);
256 } 256 }
257 257
258 if (flash_mtd) { 258 if (flash_mtd) {
259 del_mtd_device(flash_mtd); 259 mtd_device_unregister(flash_mtd);
260 map_destroy(flash_mtd); 260 map_destroy(flash_mtd);
261 iounmap((void *)cdb89712_flash_map.virt); 261 iounmap((void *)cdb89712_flash_map.virt);
262 release_resource (&cdb89712_flash_resource); 262 release_resource (&cdb89712_flash_resource);
263 } 263 }
264 264
265 if (bootrom_mtd) { 265 if (bootrom_mtd) {
266 del_mtd_device(bootrom_mtd); 266 mtd_device_unregister(bootrom_mtd);
267 map_destroy(bootrom_mtd); 267 map_destroy(bootrom_mtd);
268 iounmap((void *)cdb89712_bootrom_map.virt); 268 iounmap((void *)cdb89712_bootrom_map.virt);
269 release_resource (&cdb89712_bootrom_resource); 269 release_resource (&cdb89712_bootrom_resource);
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
index 23f551dc8ca8..06f9c9815720 100644
--- a/drivers/mtd/maps/ceiva.c
+++ b/drivers/mtd/maps/ceiva.c
@@ -224,7 +224,7 @@ static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd
224{ 224{
225 int i; 225 int i;
226 226
227 del_mtd_partitions(mtd); 227 mtd_device_unregister(mtd);
228 228
229 if (mtd != clps[0].mtd) 229 if (mtd != clps[0].mtd)
230 mtd_concat_destroy(mtd); 230 mtd_concat_destroy(mtd);
@@ -292,11 +292,11 @@ static void __init clps_locate_partitions(struct mtd_info *mtd)
292 if (nr_parts == 0) { 292 if (nr_parts == 0) {
293 printk(KERN_NOTICE "clps flash: no partition info " 293 printk(KERN_NOTICE "clps flash: no partition info "
294 "available, registering whole flash\n"); 294 "available, registering whole flash\n");
295 add_mtd_device(mtd); 295 mtd_device_register(mtd, NULL, 0);
296 } else { 296 } else {
297 printk(KERN_NOTICE "clps flash: using %s partition " 297 printk(KERN_NOTICE "clps flash: using %s partition "
298 "definition\n", part_type); 298 "definition\n", part_type);
299 add_mtd_partitions(mtd, parsed_parts, nr_parts); 299 mtd_device_register(mtd, parsed_parts, nr_parts);
300 } 300 }
301 301
302 /* Always succeeds. */ 302 /* Always succeeds. */
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
index f71343cd77cc..d16fc9d3b8cd 100644
--- a/drivers/mtd/maps/cfi_flagadm.c
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -107,7 +107,7 @@ static int __init init_flagadm(void)
107 mymtd = do_map_probe("cfi_probe", &flagadm_map); 107 mymtd = do_map_probe("cfi_probe", &flagadm_map);
108 if (mymtd) { 108 if (mymtd) {
109 mymtd->owner = THIS_MODULE; 109 mymtd->owner = THIS_MODULE;
110 add_mtd_partitions(mymtd, flagadm_parts, PARTITION_COUNT); 110 mtd_device_register(mymtd, flagadm_parts, PARTITION_COUNT);
111 printk(KERN_NOTICE "FlagaDM flash device initialized\n"); 111 printk(KERN_NOTICE "FlagaDM flash device initialized\n");
112 return 0; 112 return 0;
113 } 113 }
@@ -119,7 +119,7 @@ static int __init init_flagadm(void)
119static void __exit cleanup_flagadm(void) 119static void __exit cleanup_flagadm(void)
120{ 120{
121 if (mymtd) { 121 if (mymtd) {
122 del_mtd_partitions(mymtd); 122 mtd_device_unregister(mymtd);
123 map_destroy(mymtd); 123 map_destroy(mymtd);
124 } 124 }
125 if (flagadm_map.virt) { 125 if (flagadm_map.virt) {
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 5fdb7b26cea3..3d0e762fa5f2 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -94,7 +94,7 @@ static void ck804xrom_cleanup(struct ck804xrom_window *window)
94 if (map->rsrc.parent) 94 if (map->rsrc.parent)
95 release_resource(&map->rsrc); 95 release_resource(&map->rsrc);
96 96
97 del_mtd_device(map->mtd); 97 mtd_device_unregister(map->mtd);
98 map_destroy(map->mtd); 98 map_destroy(map->mtd);
99 list_del(&map->list); 99 list_del(&map->list);
100 kfree(map); 100 kfree(map);
@@ -291,7 +291,7 @@ static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
291 291
292 /* Now that the mtd devices is complete claim and export it */ 292 /* Now that the mtd devices is complete claim and export it */
293 map->mtd->owner = THIS_MODULE; 293 map->mtd->owner = THIS_MODULE;
294 if (add_mtd_device(map->mtd)) { 294 if (mtd_device_register(map->mtd, NULL, 0)) {
295 map_destroy(map->mtd); 295 map_destroy(map->mtd);
296 map->mtd = NULL; 296 map->mtd = NULL;
297 goto out; 297 goto out;
diff --git a/drivers/mtd/maps/dbox2-flash.c b/drivers/mtd/maps/dbox2-flash.c
index cfacfa6f45dd..85bdece6ab3f 100644
--- a/drivers/mtd/maps/dbox2-flash.c
+++ b/drivers/mtd/maps/dbox2-flash.c
@@ -93,7 +93,7 @@ static int __init init_dbox2_flash(void)
93 mymtd->owner = THIS_MODULE; 93 mymtd->owner = THIS_MODULE;
94 94
95 /* Create MTD devices for each partition. */ 95 /* Create MTD devices for each partition. */
96 add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS); 96 mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
97 97
98 return 0; 98 return 0;
99 } 99 }
@@ -105,7 +105,7 @@ static int __init init_dbox2_flash(void)
105static void __exit cleanup_dbox2_flash(void) 105static void __exit cleanup_dbox2_flash(void)
106{ 106{
107 if (mymtd) { 107 if (mymtd) {
108 del_mtd_partitions(mymtd); 108 mtd_device_unregister(mymtd);
109 map_destroy(mymtd); 109 map_destroy(mymtd);
110 } 110 }
111 if (dbox2_flash_map.virt) { 111 if (dbox2_flash_map.virt) {
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index b3cb3a183809..7a9e1989c977 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -145,17 +145,13 @@ static struct map_info dc21285_map = {
145 145
146 146
147/* Partition stuff */ 147/* Partition stuff */
148#ifdef CONFIG_MTD_PARTITIONS
149static struct mtd_partition *dc21285_parts; 148static struct mtd_partition *dc21285_parts;
150static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; 149static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
151#endif
152 150
153static int __init init_dc21285(void) 151static int __init init_dc21285(void)
154{ 152{
155 153
156#ifdef CONFIG_MTD_PARTITIONS
157 int nrparts; 154 int nrparts;
158#endif
159 155
160 /* Determine bankwidth */ 156 /* Determine bankwidth */
161 switch (*CSR_SA110_CNTL & (3<<14)) { 157 switch (*CSR_SA110_CNTL & (3<<14)) {
@@ -204,13 +200,8 @@ static int __init init_dc21285(void)
204 200
205 dc21285_mtd->owner = THIS_MODULE; 201 dc21285_mtd->owner = THIS_MODULE;
206 202
207#ifdef CONFIG_MTD_PARTITIONS
208 nrparts = parse_mtd_partitions(dc21285_mtd, probes, &dc21285_parts, 0); 203 nrparts = parse_mtd_partitions(dc21285_mtd, probes, &dc21285_parts, 0);
209 if (nrparts > 0) 204 mtd_device_register(dc21285_mtd, dc21285_parts, nrparts);
210 add_mtd_partitions(dc21285_mtd, dc21285_parts, nrparts);
211 else
212#endif
213 add_mtd_device(dc21285_mtd);
214 205
215 if(machine_is_ebsa285()) { 206 if(machine_is_ebsa285()) {
216 /* 207 /*
@@ -232,14 +223,9 @@ static int __init init_dc21285(void)
232 223
233static void __exit cleanup_dc21285(void) 224static void __exit cleanup_dc21285(void)
234{ 225{
235#ifdef CONFIG_MTD_PARTITIONS 226 mtd_device_unregister(dc21285_mtd);
236 if (dc21285_parts) { 227 if (dc21285_parts)
237 del_mtd_partitions(dc21285_mtd);
238 kfree(dc21285_parts); 228 kfree(dc21285_parts);
239 } else
240#endif
241 del_mtd_device(dc21285_mtd);
242
243 map_destroy(dc21285_mtd); 229 map_destroy(dc21285_mtd);
244 iounmap(dc21285_map.virt); 230 iounmap(dc21285_map.virt);
245} 231}
diff --git a/drivers/mtd/maps/dilnetpc.c b/drivers/mtd/maps/dilnetpc.c
index 0713e3a5a22c..3e393f0da823 100644
--- a/drivers/mtd/maps/dilnetpc.c
+++ b/drivers/mtd/maps/dilnetpc.c
@@ -450,7 +450,7 @@ static int __init init_dnpc(void)
450 partition_info[2].mtdp = &lowlvl_parts[1]; 450 partition_info[2].mtdp = &lowlvl_parts[1];
451 partition_info[3].mtdp = &lowlvl_parts[3]; 451 partition_info[3].mtdp = &lowlvl_parts[3];
452 452
453 add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS); 453 mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
454 454
455 /* 455 /*
456 ** now create a virtual MTD device by concatenating the for partitions 456 ** now create a virtual MTD device by concatenating the for partitions
@@ -463,7 +463,8 @@ static int __init init_dnpc(void)
463 ** we do not supply mtd pointers in higlvl_partition_info, so 463 ** we do not supply mtd pointers in higlvl_partition_info, so
464 ** add_mtd_partitions() will register the devices. 464 ** add_mtd_partitions() will register the devices.
465 */ 465 */
466 add_mtd_partitions(merged_mtd, higlvl_partition_info, NUM_HIGHLVL_PARTITIONS); 466 mtd_device_register(merged_mtd, higlvl_partition_info,
467 NUM_HIGHLVL_PARTITIONS);
467 } 468 }
468 469
469 return 0; 470 return 0;
@@ -472,12 +473,12 @@ static int __init init_dnpc(void)
472static void __exit cleanup_dnpc(void) 473static void __exit cleanup_dnpc(void)
473{ 474{
474 if(merged_mtd) { 475 if(merged_mtd) {
475 del_mtd_partitions(merged_mtd); 476 mtd_device_unregister(merged_mtd);
476 mtd_concat_destroy(merged_mtd); 477 mtd_concat_destroy(merged_mtd);
477 } 478 }
478 479
479 if (mymtd) { 480 if (mymtd) {
480 del_mtd_partitions(mymtd); 481 mtd_device_unregister(mymtd);
481 map_destroy(mymtd); 482 map_destroy(mymtd);
482 } 483 }
483 if (dnpc_map.virt) { 484 if (dnpc_map.virt) {
diff --git a/drivers/mtd/maps/dmv182.c b/drivers/mtd/maps/dmv182.c
index d171674eb2ed..6538ac675e00 100644
--- a/drivers/mtd/maps/dmv182.c
+++ b/drivers/mtd/maps/dmv182.c
@@ -120,7 +120,7 @@ static int __init init_svme182(void)
120 this_mtd->size >> 20, FLASH_BASE_ADDR); 120 this_mtd->size >> 20, FLASH_BASE_ADDR);
121 121
122 this_mtd->owner = THIS_MODULE; 122 this_mtd->owner = THIS_MODULE;
123 add_mtd_partitions(this_mtd, partitions, num_parts); 123 mtd_device_register(this_mtd, partitions, num_parts);
124 124
125 return 0; 125 return 0;
126} 126}
@@ -129,7 +129,7 @@ static void __exit cleanup_svme182(void)
129{ 129{
130 if (this_mtd) 130 if (this_mtd)
131 { 131 {
132 del_mtd_partitions(this_mtd); 132 mtd_device_unregister(this_mtd);
133 map_destroy(this_mtd); 133 map_destroy(this_mtd);
134 } 134 }
135 135
diff --git a/drivers/mtd/maps/edb7312.c b/drivers/mtd/maps/edb7312.c
index be9e90b44587..fe42a212bb3e 100644
--- a/drivers/mtd/maps/edb7312.c
+++ b/drivers/mtd/maps/edb7312.c
@@ -15,10 +15,7 @@
15#include <asm/io.h> 15#include <asm/io.h>
16#include <linux/mtd/mtd.h> 16#include <linux/mtd/mtd.h>
17#include <linux/mtd/map.h> 17#include <linux/mtd/map.h>
18
19#ifdef CONFIG_MTD_PARTITIONS
20#include <linux/mtd/partitions.h> 18#include <linux/mtd/partitions.h>
21#endif
22 19
23#define WINDOW_ADDR 0x00000000 /* physical properties of flash */ 20#define WINDOW_ADDR 0x00000000 /* physical properties of flash */
24#define WINDOW_SIZE 0x01000000 21#define WINDOW_SIZE 0x01000000
@@ -40,8 +37,6 @@ struct map_info edb7312nor_map = {
40 .phys = WINDOW_ADDR, 37 .phys = WINDOW_ADDR,
41}; 38};
42 39
43#ifdef CONFIG_MTD_PARTITIONS
44
45/* 40/*
46 * MTD partitioning stuff 41 * MTD partitioning stuff
47 */ 42 */
@@ -66,8 +61,6 @@ static struct mtd_partition static_partitions[3] =
66 61
67static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; 62static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
68 63
69#endif
70
71static int mtd_parts_nb = 0; 64static int mtd_parts_nb = 0;
72static struct mtd_partition *mtd_parts = 0; 65static struct mtd_partition *mtd_parts = 0;
73 66
@@ -96,27 +89,24 @@ static int __init init_edb7312nor(void)
96 if (mymtd) { 89 if (mymtd) {
97 mymtd->owner = THIS_MODULE; 90 mymtd->owner = THIS_MODULE;
98 91
99#ifdef CONFIG_MTD_PARTITIONS
100 mtd_parts_nb = parse_mtd_partitions(mymtd, probes, &mtd_parts, MTDID); 92 mtd_parts_nb = parse_mtd_partitions(mymtd, probes, &mtd_parts, MTDID);
101 if (mtd_parts_nb > 0) 93 if (mtd_parts_nb > 0)
102 part_type = "detected"; 94 part_type = "detected";
103 95
104 if (mtd_parts_nb == 0) 96 if (mtd_parts_nb == 0) {
105 {
106 mtd_parts = static_partitions; 97 mtd_parts = static_partitions;
107 mtd_parts_nb = ARRAY_SIZE(static_partitions); 98 mtd_parts_nb = ARRAY_SIZE(static_partitions);
108 part_type = "static"; 99 part_type = "static";
109 } 100 }
110#endif 101
111 add_mtd_device(mymtd);
112 if (mtd_parts_nb == 0) 102 if (mtd_parts_nb == 0)
113 printk(KERN_NOTICE MSG_PREFIX "no partition info available\n"); 103 printk(KERN_NOTICE MSG_PREFIX "no partition info available\n");
114 else 104 else
115 {
116 printk(KERN_NOTICE MSG_PREFIX 105 printk(KERN_NOTICE MSG_PREFIX
117 "using %s partition definition\n", part_type); 106 "using %s partition definition\n", part_type);
118 add_mtd_partitions(mymtd, mtd_parts, mtd_parts_nb); 107 /* Register the whole device first. */
119 } 108 mtd_device_register(mymtd, NULL, 0);
109 mtd_device_register(mymtd, mtd_parts, mtd_parts_nb);
120 return 0; 110 return 0;
121 } 111 }
122 112
@@ -127,7 +117,7 @@ static int __init init_edb7312nor(void)
127static void __exit cleanup_edb7312nor(void) 117static void __exit cleanup_edb7312nor(void)
128{ 118{
129 if (mymtd) { 119 if (mymtd) {
130 del_mtd_device(mymtd); 120 mtd_device_unregister(mymtd);
131 map_destroy(mymtd); 121 map_destroy(mymtd);
132 } 122 }
133 if (edb7312nor_map.virt) { 123 if (edb7312nor_map.virt) {
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index 4feb7507ab7c..08322b1c3e81 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -128,7 +128,7 @@ static void esb2rom_cleanup(struct esb2rom_window *window)
128 list_for_each_entry_safe(map, scratch, &window->maps, list) { 128 list_for_each_entry_safe(map, scratch, &window->maps, list) {
129 if (map->rsrc.parent) 129 if (map->rsrc.parent)
130 release_resource(&map->rsrc); 130 release_resource(&map->rsrc);
131 del_mtd_device(map->mtd); 131 mtd_device_unregister(map->mtd);
132 map_destroy(map->mtd); 132 map_destroy(map->mtd);
133 list_del(&map->list); 133 list_del(&map->list);
134 kfree(map); 134 kfree(map);
@@ -352,7 +352,7 @@ static int __devinit esb2rom_init_one(struct pci_dev *pdev,
352 352
353 /* Now that the mtd devices is complete claim and export it */ 353 /* Now that the mtd devices is complete claim and export it */
354 map->mtd->owner = THIS_MODULE; 354 map->mtd->owner = THIS_MODULE;
355 if (add_mtd_device(map->mtd)) { 355 if (mtd_device_register(map->mtd, NULL, 0)) {
356 map_destroy(map->mtd); 356 map_destroy(map->mtd);
357 map->mtd = NULL; 357 map->mtd = NULL;
358 goto out; 358 goto out;
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
index 1e43124d498b..956e2e4f30ea 100644
--- a/drivers/mtd/maps/fortunet.c
+++ b/drivers/mtd/maps/fortunet.c
@@ -243,8 +243,9 @@ static int __init init_fortunet(void)
243 &map_regions[ix].map_info); 243 &map_regions[ix].map_info);
244 } 244 }
245 map_regions[ix].mymtd->owner = THIS_MODULE; 245 map_regions[ix].mymtd->owner = THIS_MODULE;
246 add_mtd_partitions(map_regions[ix].mymtd, 246 mtd_device_register(map_regions[ix].mymtd,
247 map_regions[ix].parts,map_regions_parts[ix]); 247 map_regions[ix].parts,
248 map_regions_parts[ix]);
248 } 249 }
249 } 250 }
250 if(iy) 251 if(iy)
@@ -261,7 +262,7 @@ static void __exit cleanup_fortunet(void)
261 { 262 {
262 if( map_regions[ix].mymtd ) 263 if( map_regions[ix].mymtd )
263 { 264 {
264 del_mtd_partitions( map_regions[ix].mymtd ); 265 mtd_device_unregister(map_regions[ix].mymtd);
265 map_destroy( map_regions[ix].mymtd ); 266 map_destroy( map_regions[ix].mymtd );
266 } 267 }
267 iounmap((void *)map_regions[ix].map_info.virt); 268 iounmap((void *)map_regions[ix].map_info.virt);
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index af5707a80205..7568c5f8b8ae 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -155,9 +155,7 @@ static void gf_copy_to(struct map_info *map, unsigned long to, const void *from,
155 memcpy_toio(map->virt + (to % state->win_size), from, len); 155 memcpy_toio(map->virt + (to % state->win_size), from, len);
156} 156}
157 157
158#ifdef CONFIG_MTD_PARTITIONS
159static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL }; 158static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
160#endif
161 159
162/** 160/**
163 * gpio_flash_probe() - setup a mapping for a GPIO assisted flash 161 * gpio_flash_probe() - setup a mapping for a GPIO assisted flash
@@ -189,7 +187,7 @@ static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
189 */ 187 */
190static int __devinit gpio_flash_probe(struct platform_device *pdev) 188static int __devinit gpio_flash_probe(struct platform_device *pdev)
191{ 189{
192 int ret; 190 int nr_parts;
193 size_t i, arr_size; 191 size_t i, arr_size;
194 struct physmap_flash_data *pdata; 192 struct physmap_flash_data *pdata;
195 struct resource *memory; 193 struct resource *memory;
@@ -254,24 +252,21 @@ static int __devinit gpio_flash_probe(struct platform_device *pdev)
254 return -ENXIO; 252 return -ENXIO;
255 } 253 }
256 254
257#ifdef CONFIG_MTD_PARTITIONS 255 nr_parts = parse_mtd_partitions(state->mtd, part_probe_types,
258 ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0); 256 &pdata->parts, 0);
259 if (ret > 0) { 257 if (nr_parts > 0) {
260 pr_devinit(KERN_NOTICE PFX "Using commandline partition definition\n"); 258 pr_devinit(KERN_NOTICE PFX "Using commandline partition definition\n");
261 add_mtd_partitions(state->mtd, pdata->parts, ret);
262 kfree(pdata->parts); 259 kfree(pdata->parts);
263
264 } else if (pdata->nr_parts) { 260 } else if (pdata->nr_parts) {
265 pr_devinit(KERN_NOTICE PFX "Using board partition definition\n"); 261 pr_devinit(KERN_NOTICE PFX "Using board partition definition\n");
266 add_mtd_partitions(state->mtd, pdata->parts, pdata->nr_parts); 262 nr_parts = pdata->nr_parts;
267 263 } else {
268 } else
269#endif
270 {
271 pr_devinit(KERN_NOTICE PFX "no partition info available, registering whole flash at once\n"); 264 pr_devinit(KERN_NOTICE PFX "no partition info available, registering whole flash at once\n");
272 add_mtd_device(state->mtd); 265 nr_parts = 0;
273 } 266 }
274 267
268 mtd_device_register(state->mtd, pdata->parts, nr_parts);
269
275 return 0; 270 return 0;
276} 271}
277 272
@@ -282,9 +277,7 @@ static int __devexit gpio_flash_remove(struct platform_device *pdev)
282 do { 277 do {
283 gpio_free(state->gpio_addrs[i]); 278 gpio_free(state->gpio_addrs[i]);
284 } while (++i < state->gpio_count); 279 } while (++i < state->gpio_count);
285#ifdef CONFIG_MTD_PARTITIONS 280 mtd_device_unregister(state->mtd);
286 del_mtd_partitions(state->mtd);
287#endif
288 map_destroy(state->mtd); 281 map_destroy(state->mtd);
289 kfree(state); 282 kfree(state);
290 return 0; 283 return 0;
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 72c724fa8c27..7f035860a36b 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -92,18 +92,16 @@ static int __init h720x_mtd_init(void)
92 if (mymtd) { 92 if (mymtd) {
93 mymtd->owner = THIS_MODULE; 93 mymtd->owner = THIS_MODULE;
94 94
95#ifdef CONFIG_MTD_PARTITIONS
96 nr_mtd_parts = parse_mtd_partitions(mymtd, probes, &mtd_parts, 0); 95 nr_mtd_parts = parse_mtd_partitions(mymtd, probes, &mtd_parts, 0);
97 if (nr_mtd_parts > 0) 96 if (nr_mtd_parts > 0)
98 part_type = "command line"; 97 part_type = "command line";
99#endif
100 if (nr_mtd_parts <= 0) { 98 if (nr_mtd_parts <= 0) {
101 mtd_parts = h720x_partitions; 99 mtd_parts = h720x_partitions;
102 nr_mtd_parts = NUM_PARTITIONS; 100 nr_mtd_parts = NUM_PARTITIONS;
103 part_type = "builtin"; 101 part_type = "builtin";
104 } 102 }
105 printk(KERN_INFO "Using %s partition table\n", part_type); 103 printk(KERN_INFO "Using %s partition table\n", part_type);
106 add_mtd_partitions(mymtd, mtd_parts, nr_mtd_parts); 104 mtd_device_register(mymtd, mtd_parts, nr_mtd_parts);
107 return 0; 105 return 0;
108 } 106 }
109 107
@@ -118,7 +116,7 @@ static void __exit h720x_mtd_cleanup(void)
118{ 116{
119 117
120 if (mymtd) { 118 if (mymtd) {
121 del_mtd_partitions(mymtd); 119 mtd_device_unregister(mymtd);
122 map_destroy(mymtd); 120 map_destroy(mymtd);
123 } 121 }
124 122
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index 1337a4191a0c..6689dcb3124d 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -67,7 +67,7 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
67 list_for_each_entry_safe(map, scratch, &window->maps, list) { 67 list_for_each_entry_safe(map, scratch, &window->maps, list) {
68 if (map->rsrc.parent) 68 if (map->rsrc.parent)
69 release_resource(&map->rsrc); 69 release_resource(&map->rsrc);
70 del_mtd_device(map->mtd); 70 mtd_device_unregister(map->mtd);
71 map_destroy(map->mtd); 71 map_destroy(map->mtd);
72 list_del(&map->list); 72 list_del(&map->list);
73 kfree(map); 73 kfree(map);
@@ -287,7 +287,7 @@ static int __devinit ichxrom_init_one (struct pci_dev *pdev,
287 287
288 /* Now that the mtd devices is complete claim and export it */ 288 /* Now that the mtd devices is complete claim and export it */
289 map->mtd->owner = THIS_MODULE; 289 map->mtd->owner = THIS_MODULE;
290 if (add_mtd_device(map->mtd)) { 290 if (mtd_device_register(map->mtd, NULL, 0)) {
291 map_destroy(map->mtd); 291 map_destroy(map->mtd);
292 map->mtd = NULL; 292 map->mtd = NULL;
293 goto out; 293 goto out;
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index 998a27da97f3..404a50cbafa0 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -15,10 +15,7 @@
15#include <asm/io.h> 15#include <asm/io.h>
16#include <linux/mtd/mtd.h> 16#include <linux/mtd/mtd.h>
17#include <linux/mtd/map.h> 17#include <linux/mtd/map.h>
18
19#ifdef CONFIG_MTD_PARTITIONS
20#include <linux/mtd/partitions.h> 18#include <linux/mtd/partitions.h>
21#endif
22 19
23#define WINDOW_ADDR0 0x00000000 /* physical properties of flash */ 20#define WINDOW_ADDR0 0x00000000 /* physical properties of flash */
24#define WINDOW_SIZE0 0x00800000 21#define WINDOW_SIZE0 0x00800000
@@ -49,8 +46,6 @@ static struct map_info impa7_map[NUM_FLASHBANKS] = {
49 }, 46 },
50}; 47};
51 48
52#ifdef CONFIG_MTD_PARTITIONS
53
54/* 49/*
55 * MTD partitioning stuff 50 * MTD partitioning stuff
56 */ 51 */
@@ -66,8 +61,6 @@ static struct mtd_partition static_partitions[] =
66static int mtd_parts_nb[NUM_FLASHBANKS]; 61static int mtd_parts_nb[NUM_FLASHBANKS];
67static struct mtd_partition *mtd_parts[NUM_FLASHBANKS]; 62static struct mtd_partition *mtd_parts[NUM_FLASHBANKS];
68 63
69#endif
70
71static const char *probes[] = { "cmdlinepart", NULL }; 64static const char *probes[] = { "cmdlinepart", NULL };
72 65
73static int __init init_impa7(void) 66static int __init init_impa7(void)
@@ -104,7 +97,6 @@ static int __init init_impa7(void)
104 if (impa7_mtd[i]) { 97 if (impa7_mtd[i]) {
105 impa7_mtd[i]->owner = THIS_MODULE; 98 impa7_mtd[i]->owner = THIS_MODULE;
106 devicesfound++; 99 devicesfound++;
107#ifdef CONFIG_MTD_PARTITIONS
108 mtd_parts_nb[i] = parse_mtd_partitions(impa7_mtd[i], 100 mtd_parts_nb[i] = parse_mtd_partitions(impa7_mtd[i],
109 probes, 101 probes,
110 &mtd_parts[i], 102 &mtd_parts[i],
@@ -120,12 +112,8 @@ static int __init init_impa7(void)
120 printk(KERN_NOTICE MSG_PREFIX 112 printk(KERN_NOTICE MSG_PREFIX
121 "using %s partition definition\n", 113 "using %s partition definition\n",
122 part_type); 114 part_type);
123 add_mtd_partitions(impa7_mtd[i], 115 mtd_device_register(impa7_mtd[i],
124 mtd_parts[i], mtd_parts_nb[i]); 116 mtd_parts[i], mtd_parts_nb[i]);
125#else
126 add_mtd_device(impa7_mtd[i]);
127
128#endif
129 } 117 }
130 else 118 else
131 iounmap((void *)impa7_map[i].virt); 119 iounmap((void *)impa7_map[i].virt);
@@ -138,11 +126,7 @@ static void __exit cleanup_impa7(void)
138 int i; 126 int i;
139 for (i=0; i<NUM_FLASHBANKS; i++) { 127 for (i=0; i<NUM_FLASHBANKS; i++) {
140 if (impa7_mtd[i]) { 128 if (impa7_mtd[i]) {
141#ifdef CONFIG_MTD_PARTITIONS 129 mtd_device_unregister(impa7_mtd[i]);
142 del_mtd_partitions(impa7_mtd[i]);
143#else
144 del_mtd_device(impa7_mtd[i]);
145#endif
146 map_destroy(impa7_mtd[i]); 130 map_destroy(impa7_mtd[i]);
147 iounmap((void *)impa7_map[i].virt); 131 iounmap((void *)impa7_map[i].virt);
148 impa7_map[i].virt = 0; 132 impa7_map[i].virt = 0;
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index fc1998512eb4..d2f47be8754b 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -66,33 +66,18 @@ struct vr_nor_mtd {
66 66
67static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p) 67static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p)
68{ 68{
69 if (p->nr_parts > 0) { 69 mtd_device_unregister(p->info);
70#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
71 del_mtd_partitions(p->info);
72#endif
73 } else
74 del_mtd_device(p->info);
75} 70}
76 71
77static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p) 72static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p)
78{ 73{
79 int err = 0;
80#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
81 struct mtd_partition *parts; 74 struct mtd_partition *parts;
82 static const char *part_probes[] = { "cmdlinepart", NULL }; 75 static const char *part_probes[] = { "cmdlinepart", NULL };
83#endif
84 76
85 /* register the flash bank */ 77 /* register the flash bank */
86#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
87 /* partition the flash bank */ 78 /* partition the flash bank */
88 p->nr_parts = parse_mtd_partitions(p->info, part_probes, &parts, 0); 79 p->nr_parts = parse_mtd_partitions(p->info, part_probes, &parts, 0);
89 if (p->nr_parts > 0) 80 return mtd_device_register(p->info, parts, p->nr_parts);
90 err = add_mtd_partitions(p->info, parts, p->nr_parts);
91#endif
92 if (p->nr_parts <= 0)
93 err = add_mtd_device(p->info);
94
95 return err;
96} 81}
97 82
98static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p) 83static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index 9639d83a9d6c..c00b9175ba9e 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -119,7 +119,7 @@ static int ixp2000_flash_remove(struct platform_device *dev)
119 return 0; 119 return 0;
120 120
121 if (info->mtd) { 121 if (info->mtd) {
122 del_mtd_partitions(info->mtd); 122 mtd_device_unregister(info->mtd);
123 map_destroy(info->mtd); 123 map_destroy(info->mtd);
124 } 124 }
125 if (info->map.map_priv_1) 125 if (info->map.map_priv_1)
@@ -230,7 +230,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
230 230
231 err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0); 231 err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0);
232 if (err > 0) { 232 if (err > 0) {
233 err = add_mtd_partitions(info->mtd, info->partitions, err); 233 err = mtd_device_register(info->mtd, info->partitions, err);
234 if(err) 234 if(err)
235 dev_err(&dev->dev, "Could not parse partitions\n"); 235 dev_err(&dev->dev, "Could not parse partitions\n");
236 } 236 }
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 1f9fde0dad35..155b21942f47 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -162,7 +162,7 @@ static int ixp4xx_flash_remove(struct platform_device *dev)
162 return 0; 162 return 0;
163 163
164 if (info->mtd) { 164 if (info->mtd) {
165 del_mtd_partitions(info->mtd); 165 mtd_device_unregister(info->mtd);
166 map_destroy(info->mtd); 166 map_destroy(info->mtd);
167 } 167 }
168 if (info->map.virt) 168 if (info->map.virt)
@@ -252,10 +252,8 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
252 /* Use the fast version */ 252 /* Use the fast version */
253 info->map.write = ixp4xx_write16; 253 info->map.write = ixp4xx_write16;
254 254
255#ifdef CONFIG_MTD_PARTITIONS
256 nr_parts = parse_mtd_partitions(info->mtd, probes, &info->partitions, 255 nr_parts = parse_mtd_partitions(info->mtd, probes, &info->partitions,
257 dev->resource->start); 256 dev->resource->start);
258#endif
259 if (nr_parts > 0) { 257 if (nr_parts > 0) {
260 part_type = "dynamic"; 258 part_type = "dynamic";
261 } else { 259 } else {
@@ -263,18 +261,16 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
263 nr_parts = plat->nr_parts; 261 nr_parts = plat->nr_parts;
264 part_type = "static"; 262 part_type = "static";
265 } 263 }
266 if (nr_parts == 0) { 264 if (nr_parts == 0)
267 printk(KERN_NOTICE "IXP4xx flash: no partition info " 265 printk(KERN_NOTICE "IXP4xx flash: no partition info "
268 "available, registering whole flash\n"); 266 "available, registering whole flash\n");
269 err = add_mtd_device(info->mtd); 267 else
270 } else {
271 printk(KERN_NOTICE "IXP4xx flash: using %s partition " 268 printk(KERN_NOTICE "IXP4xx flash: using %s partition "
272 "definition\n", part_type); 269 "definition\n", part_type);
273 err = add_mtd_partitions(info->mtd, info->partitions, nr_parts);
274 270
275 if(err) 271 err = mtd_device_register(info->mtd, info->partitions, nr_parts);
276 printk(KERN_ERR "Could not parse partitions\n"); 272 if (err)
277 } 273 printk(KERN_ERR "Could not parse partitions\n");
278 274
279 if (err) 275 if (err)
280 goto Error; 276 goto Error;
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index 9e054503c4cf..dd0360ba2412 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -138,7 +138,7 @@ static int __init init_l440gx(void)
138 if (mymtd) { 138 if (mymtd) {
139 mymtd->owner = THIS_MODULE; 139 mymtd->owner = THIS_MODULE;
140 140
141 add_mtd_device(mymtd); 141 mtd_device_register(mymtd, NULL, 0);
142 return 0; 142 return 0;
143 } 143 }
144 144
@@ -148,7 +148,7 @@ static int __init init_l440gx(void)
148 148
149static void __exit cleanup_l440gx(void) 149static void __exit cleanup_l440gx(void)
150{ 150{
151 del_mtd_device(mymtd); 151 mtd_device_unregister(mymtd);
152 map_destroy(mymtd); 152 map_destroy(mymtd);
153 153
154 iounmap(l440gx_map.virt); 154 iounmap(l440gx_map.virt);
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index ee2548085334..5936c466e901 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -112,18 +112,9 @@ static int latch_addr_flash_remove(struct platform_device *dev)
112 latch_addr_data = dev->dev.platform_data; 112 latch_addr_data = dev->dev.platform_data;
113 113
114 if (info->mtd != NULL) { 114 if (info->mtd != NULL) {
115 if (mtd_has_partitions()) { 115 if (info->nr_parts)
116 if (info->nr_parts) { 116 kfree(info->parts);
117 del_mtd_partitions(info->mtd); 117 mtd_device_unregister(info->mtd);
118 kfree(info->parts);
119 } else if (latch_addr_data->nr_parts) {
120 del_mtd_partitions(info->mtd);
121 } else {
122 del_mtd_device(info->mtd);
123 }
124 } else {
125 del_mtd_device(info->mtd);
126 }
127 map_destroy(info->mtd); 118 map_destroy(info->mtd);
128 } 119 }
129 120
@@ -215,23 +206,21 @@ static int __devinit latch_addr_flash_probe(struct platform_device *dev)
215 } 206 }
216 info->mtd->owner = THIS_MODULE; 207 info->mtd->owner = THIS_MODULE;
217 208
218 if (mtd_has_partitions()) { 209 err = parse_mtd_partitions(info->mtd, (const char **)part_probe_types,
219 210 &info->parts, 0);
220 err = parse_mtd_partitions(info->mtd, 211 if (err > 0) {
221 (const char **)part_probe_types, 212 mtd_device_register(info->mtd, info->parts, err);
222 &info->parts, 0); 213 return 0;
223 if (err > 0) { 214 }
224 add_mtd_partitions(info->mtd, info->parts, err); 215 if (latch_addr_data->nr_parts) {
225 return 0; 216 pr_notice("Using latch-addr-flash partition information\n");
226 } 217 mtd_device_register(info->mtd,
227 if (latch_addr_data->nr_parts) { 218 latch_addr_data->parts,
228 pr_notice("Using latch-addr-flash partition information\n"); 219 latch_addr_data->nr_parts);
229 add_mtd_partitions(info->mtd, latch_addr_data->parts, 220 return 0;
230 latch_addr_data->nr_parts);
231 return 0;
232 }
233 } 221 }
234 add_mtd_device(info->mtd); 222
223 mtd_device_register(info->mtd, NULL, 0);
235 return 0; 224 return 0;
236 225
237iounmap: 226iounmap:
diff --git a/drivers/mtd/maps/mbx860.c b/drivers/mtd/maps/mbx860.c
index 0eb5a7c85380..93fa56c33003 100644
--- a/drivers/mtd/maps/mbx860.c
+++ b/drivers/mtd/maps/mbx860.c
@@ -69,8 +69,8 @@ static int __init init_mbx(void)
69 mymtd = do_map_probe("jedec_probe", &mbx_map); 69 mymtd = do_map_probe("jedec_probe", &mbx_map);
70 if (mymtd) { 70 if (mymtd) {
71 mymtd->owner = THIS_MODULE; 71 mymtd->owner = THIS_MODULE;
72 add_mtd_device(mymtd); 72 mtd_device_register(mymtd, NULL, 0);
73 add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS); 73 mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
74 return 0; 74 return 0;
75 } 75 }
76 76
@@ -81,7 +81,7 @@ static int __init init_mbx(void)
81static void __exit cleanup_mbx(void) 81static void __exit cleanup_mbx(void)
82{ 82{
83 if (mymtd) { 83 if (mymtd) {
84 del_mtd_device(mymtd); 84 mtd_device_unregister(mymtd);
85 map_destroy(mymtd); 85 map_destroy(mymtd);
86 } 86 }
87 if (mbx_map.virt) { 87 if (mbx_map.virt) {
diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c
index c0cb319b2b70..81dc2598bc0a 100644
--- a/drivers/mtd/maps/netsc520.c
+++ b/drivers/mtd/maps/netsc520.c
@@ -116,14 +116,14 @@ static int __init init_netsc520(void)
116 } 116 }
117 117
118 mymtd->owner = THIS_MODULE; 118 mymtd->owner = THIS_MODULE;
119 add_mtd_partitions( mymtd, partition_info, NUM_PARTITIONS ); 119 mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
120 return 0; 120 return 0;
121} 121}
122 122
123static void __exit cleanup_netsc520(void) 123static void __exit cleanup_netsc520(void)
124{ 124{
125 if (mymtd) { 125 if (mymtd) {
126 del_mtd_partitions(mymtd); 126 mtd_device_unregister(mymtd);
127 map_destroy(mymtd); 127 map_destroy(mymtd);
128 } 128 }
129 if (netsc520_map.virt) { 129 if (netsc520_map.virt) {
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index a97133eb9d70..eadcfffc4f9c 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -383,13 +383,13 @@ static int __init nettel_init(void)
383 /* No BIOS regions when AMD boot */ 383 /* No BIOS regions when AMD boot */
384 num_intel_partitions -= 2; 384 num_intel_partitions -= 2;
385 } 385 }
386 rc = add_mtd_partitions(intel_mtd, nettel_intel_partitions, 386 rc = mtd_device_register(intel_mtd, nettel_intel_partitions,
387 num_intel_partitions); 387 num_intel_partitions);
388#endif 388#endif
389 389
390 if (amd_mtd) { 390 if (amd_mtd) {
391 rc = add_mtd_partitions(amd_mtd, nettel_amd_partitions, 391 rc = mtd_device_register(amd_mtd, nettel_amd_partitions,
392 num_amd_partitions); 392 num_amd_partitions);
393 } 393 }
394 394
395#ifdef CONFIG_MTD_CFI_INTELEXT 395#ifdef CONFIG_MTD_CFI_INTELEXT
@@ -419,7 +419,7 @@ static void __exit nettel_cleanup(void)
419 unregister_reboot_notifier(&nettel_notifier_block); 419 unregister_reboot_notifier(&nettel_notifier_block);
420#endif 420#endif
421 if (amd_mtd) { 421 if (amd_mtd) {
422 del_mtd_partitions(amd_mtd); 422 mtd_device_unregister(amd_mtd);
423 map_destroy(amd_mtd); 423 map_destroy(amd_mtd);
424 } 424 }
425 if (nettel_mmcrp) { 425 if (nettel_mmcrp) {
@@ -432,7 +432,7 @@ static void __exit nettel_cleanup(void)
432 } 432 }
433#ifdef CONFIG_MTD_CFI_INTELEXT 433#ifdef CONFIG_MTD_CFI_INTELEXT
434 if (intel_mtd) { 434 if (intel_mtd) {
435 del_mtd_partitions(intel_mtd); 435 mtd_device_unregister(intel_mtd);
436 map_destroy(intel_mtd); 436 map_destroy(intel_mtd);
437 } 437 }
438 if (nettel_intel_map.virt) { 438 if (nettel_intel_map.virt) {
diff --git a/drivers/mtd/maps/octagon-5066.c b/drivers/mtd/maps/octagon-5066.c
index 23fe1786770f..807ac2a2e686 100644
--- a/drivers/mtd/maps/octagon-5066.c
+++ b/drivers/mtd/maps/octagon-5066.c
@@ -175,7 +175,7 @@ void cleanup_oct5066(void)
175 int i; 175 int i;
176 for (i=0; i<2; i++) { 176 for (i=0; i<2; i++) {
177 if (oct5066_mtd[i]) { 177 if (oct5066_mtd[i]) {
178 del_mtd_device(oct5066_mtd[i]); 178 mtd_device_unregister(oct5066_mtd[i]);
179 map_destroy(oct5066_mtd[i]); 179 map_destroy(oct5066_mtd[i]);
180 } 180 }
181 } 181 }
@@ -220,7 +220,7 @@ static int __init init_oct5066(void)
220 oct5066_mtd[i] = do_map_probe("map_rom", &oct5066_map[i]); 220 oct5066_mtd[i] = do_map_probe("map_rom", &oct5066_map[i]);
221 if (oct5066_mtd[i]) { 221 if (oct5066_mtd[i]) {
222 oct5066_mtd[i]->owner = THIS_MODULE; 222 oct5066_mtd[i]->owner = THIS_MODULE;
223 add_mtd_device(oct5066_mtd[i]); 223 mtd_device_register(oct5066_mtd[i], NULL, 0);
224 } 224 }
225 } 225 }
226 226
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 48f4cf5cb9d1..1d005a3e9b41 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -313,7 +313,7 @@ mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
313 goto release; 313 goto release;
314 314
315 mtd->owner = THIS_MODULE; 315 mtd->owner = THIS_MODULE;
316 add_mtd_device(mtd); 316 mtd_device_register(mtd, NULL, 0);
317 317
318 pci_set_drvdata(dev, mtd); 318 pci_set_drvdata(dev, mtd);
319 319
@@ -336,7 +336,7 @@ mtd_pci_remove(struct pci_dev *dev)
336 struct mtd_info *mtd = pci_get_drvdata(dev); 336 struct mtd_info *mtd = pci_get_drvdata(dev);
337 struct map_pci_info *map = mtd->priv; 337 struct map_pci_info *map = mtd->priv;
338 338
339 del_mtd_device(mtd); 339 mtd_device_unregister(mtd);
340 map_destroy(mtd); 340 map_destroy(mtd);
341 map->exit(dev, map); 341 map->exit(dev, map);
342 kfree(map); 342 kfree(map);
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 33dc2829b01b..bbe168b65c26 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -630,7 +630,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
630 dev->pcmcia_map.copy_to = pcmcia_copy_to; 630 dev->pcmcia_map.copy_to = pcmcia_copy_to;
631 } 631 }
632 632
633 if(add_mtd_device(mtd)) { 633 if (mtd_device_register(mtd, NULL, 0)) {
634 map_destroy(mtd); 634 map_destroy(mtd);
635 dev->mtd_info = NULL; 635 dev->mtd_info = NULL;
636 dev_err(&dev->p_dev->dev, 636 dev_err(&dev->p_dev->dev,
@@ -669,7 +669,7 @@ static void pcmciamtd_detach(struct pcmcia_device *link)
669 DEBUG(3, "link=0x%p", link); 669 DEBUG(3, "link=0x%p", link);
670 670
671 if(dev->mtd_info) { 671 if(dev->mtd_info) {
672 del_mtd_device(dev->mtd_info); 672 mtd_device_unregister(dev->mtd_info);
673 dev_info(&dev->p_dev->dev, "mtd%d: Removing\n", 673 dev_info(&dev->p_dev->dev, "mtd%d: Removing\n",
674 dev->mtd_info->index); 674 dev->mtd_info->index);
675 map_destroy(dev->mtd_info); 675 map_destroy(dev->mtd_info);
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 1a9b94f0ee54..f64cee4a3bfb 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -27,10 +27,8 @@ struct physmap_flash_info {
27 struct mtd_info *mtd[MAX_RESOURCES]; 27 struct mtd_info *mtd[MAX_RESOURCES];
28 struct mtd_info *cmtd; 28 struct mtd_info *cmtd;
29 struct map_info map[MAX_RESOURCES]; 29 struct map_info map[MAX_RESOURCES];
30#ifdef CONFIG_MTD_PARTITIONS
31 int nr_parts; 30 int nr_parts;
32 struct mtd_partition *parts; 31 struct mtd_partition *parts;
33#endif
34}; 32};
35 33
36static int physmap_flash_remove(struct platform_device *dev) 34static int physmap_flash_remove(struct platform_device *dev)
@@ -47,18 +45,9 @@ static int physmap_flash_remove(struct platform_device *dev)
47 physmap_data = dev->dev.platform_data; 45 physmap_data = dev->dev.platform_data;
48 46
49 if (info->cmtd) { 47 if (info->cmtd) {
50#ifdef CONFIG_MTD_PARTITIONS 48 mtd_device_unregister(info->cmtd);
51 if (info->nr_parts || physmap_data->nr_parts) { 49 if (info->nr_parts)
52 del_mtd_partitions(info->cmtd); 50 kfree(info->parts);
53
54 if (info->nr_parts)
55 kfree(info->parts);
56 } else {
57 del_mtd_device(info->cmtd);
58 }
59#else
60 del_mtd_device(info->cmtd);
61#endif
62 if (info->cmtd != info->mtd[0]) 51 if (info->cmtd != info->mtd[0])
63 mtd_concat_destroy(info->cmtd); 52 mtd_concat_destroy(info->cmtd);
64 } 53 }
@@ -92,10 +81,8 @@ static const char *rom_probe_types[] = {
92 "qinfo_probe", 81 "qinfo_probe",
93 "map_rom", 82 "map_rom",
94 NULL }; 83 NULL };
95#ifdef CONFIG_MTD_PARTITIONS
96static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", "afs", 84static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", "afs",
97 NULL }; 85 NULL };
98#endif
99 86
100static int physmap_flash_probe(struct platform_device *dev) 87static int physmap_flash_probe(struct platform_device *dev)
101{ 88{
@@ -188,24 +175,23 @@ static int physmap_flash_probe(struct platform_device *dev)
188 if (err) 175 if (err)
189 goto err_out; 176 goto err_out;
190 177
191#ifdef CONFIG_MTD_PARTITIONS
192 err = parse_mtd_partitions(info->cmtd, part_probe_types, 178 err = parse_mtd_partitions(info->cmtd, part_probe_types,
193 &info->parts, 0); 179 &info->parts, 0);
194 if (err > 0) { 180 if (err > 0) {
195 add_mtd_partitions(info->cmtd, info->parts, err); 181 mtd_device_register(info->cmtd, info->parts, err);
196 info->nr_parts = err; 182 info->nr_parts = err;
197 return 0; 183 return 0;
198 } 184 }
199 185
200 if (physmap_data->nr_parts) { 186 if (physmap_data->nr_parts) {
201 printk(KERN_NOTICE "Using physmap partition information\n"); 187 printk(KERN_NOTICE "Using physmap partition information\n");
202 add_mtd_partitions(info->cmtd, physmap_data->parts, 188 mtd_device_register(info->cmtd, physmap_data->parts,
203 physmap_data->nr_parts); 189 physmap_data->nr_parts);
204 return 0; 190 return 0;
205 } 191 }
206#endif
207 192
208 add_mtd_device(info->cmtd); 193 mtd_device_register(info->cmtd, NULL, 0);
194
209 return 0; 195 return 0;
210 196
211err_out: 197err_out:
@@ -269,14 +255,12 @@ void physmap_configure(unsigned long addr, unsigned long size,
269 physmap_flash_data.set_vpp = set_vpp; 255 physmap_flash_data.set_vpp = set_vpp;
270} 256}
271 257
272#ifdef CONFIG_MTD_PARTITIONS
273void physmap_set_partitions(struct mtd_partition *parts, int num_parts) 258void physmap_set_partitions(struct mtd_partition *parts, int num_parts)
274{ 259{
275 physmap_flash_data.nr_parts = num_parts; 260 physmap_flash_data.nr_parts = num_parts;
276 physmap_flash_data.parts = parts; 261 physmap_flash_data.parts = parts;
277} 262}
278#endif 263#endif
279#endif
280 264
281static int __init physmap_init(void) 265static int __init physmap_init(void)
282{ 266{
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index c1d33464aee8..d251d1db129b 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -34,16 +34,12 @@ struct of_flash_list {
34 34
35struct of_flash { 35struct of_flash {
36 struct mtd_info *cmtd; 36 struct mtd_info *cmtd;
37#ifdef CONFIG_MTD_PARTITIONS
38 struct mtd_partition *parts; 37 struct mtd_partition *parts;
39#endif
40 int list_size; /* number of elements in of_flash_list */ 38 int list_size; /* number of elements in of_flash_list */
41 struct of_flash_list list[0]; 39 struct of_flash_list list[0];
42}; 40};
43 41
44#ifdef CONFIG_MTD_PARTITIONS
45#define OF_FLASH_PARTS(info) ((info)->parts) 42#define OF_FLASH_PARTS(info) ((info)->parts)
46
47static int parse_obsolete_partitions(struct platform_device *dev, 43static int parse_obsolete_partitions(struct platform_device *dev,
48 struct of_flash *info, 44 struct of_flash *info,
49 struct device_node *dp) 45 struct device_node *dp)
@@ -89,10 +85,6 @@ static int parse_obsolete_partitions(struct platform_device *dev,
89 85
90 return nr_parts; 86 return nr_parts;
91} 87}
92#else /* MTD_PARTITIONS */
93#define OF_FLASH_PARTS(info) (0)
94#define parse_partitions(info, dev) (0)
95#endif /* MTD_PARTITIONS */
96 88
97static int of_flash_remove(struct platform_device *dev) 89static int of_flash_remove(struct platform_device *dev)
98{ 90{
@@ -105,17 +97,14 @@ static int of_flash_remove(struct platform_device *dev)
105 dev_set_drvdata(&dev->dev, NULL); 97 dev_set_drvdata(&dev->dev, NULL);
106 98
107 if (info->cmtd != info->list[0].mtd) { 99 if (info->cmtd != info->list[0].mtd) {
108 del_mtd_device(info->cmtd); 100 mtd_device_unregister(info->cmtd);
109 mtd_concat_destroy(info->cmtd); 101 mtd_concat_destroy(info->cmtd);
110 } 102 }
111 103
112 if (info->cmtd) { 104 if (info->cmtd) {
113 if (OF_FLASH_PARTS(info)) { 105 if (OF_FLASH_PARTS(info))
114 del_mtd_partitions(info->cmtd);
115 kfree(OF_FLASH_PARTS(info)); 106 kfree(OF_FLASH_PARTS(info));
116 } else { 107 mtd_device_unregister(info->cmtd);
117 del_mtd_device(info->cmtd);
118 }
119 } 108 }
120 109
121 for (i = 0; i < info->list_size; i++) { 110 for (i = 0; i < info->list_size; i++) {
@@ -172,7 +161,6 @@ static struct mtd_info * __devinit obsolete_probe(struct platform_device *dev,
172 } 161 }
173} 162}
174 163
175#ifdef CONFIG_MTD_PARTITIONS
176/* When partitions are set we look for a linux,part-probe property which 164/* When partitions are set we look for a linux,part-probe property which
177 specifies the list of partition probers to use. If none is given then the 165 specifies the list of partition probers to use. If none is given then the
178 default is use. These take precedence over other device tree 166 default is use. These take precedence over other device tree
@@ -212,14 +200,11 @@ static void __devinit of_free_probes(const char **probes)
212 if (probes != part_probe_types_def) 200 if (probes != part_probe_types_def)
213 kfree(probes); 201 kfree(probes);
214} 202}
215#endif
216 203
217static struct of_device_id of_flash_match[]; 204static struct of_device_id of_flash_match[];
218static int __devinit of_flash_probe(struct platform_device *dev) 205static int __devinit of_flash_probe(struct platform_device *dev)
219{ 206{
220#ifdef CONFIG_MTD_PARTITIONS
221 const char **part_probe_types; 207 const char **part_probe_types;
222#endif
223 const struct of_device_id *match; 208 const struct of_device_id *match;
224 struct device_node *dp = dev->dev.of_node; 209 struct device_node *dp = dev->dev.of_node;
225 struct resource res; 210 struct resource res;
@@ -346,7 +331,6 @@ static int __devinit of_flash_probe(struct platform_device *dev)
346 if (err) 331 if (err)
347 goto err_out; 332 goto err_out;
348 333
349#ifdef CONFIG_MTD_PARTITIONS
350 part_probe_types = of_get_probes(dp); 334 part_probe_types = of_get_probes(dp);
351 err = parse_mtd_partitions(info->cmtd, part_probe_types, 335 err = parse_mtd_partitions(info->cmtd, part_probe_types,
352 &info->parts, 0); 336 &info->parts, 0);
@@ -356,13 +340,11 @@ static int __devinit of_flash_probe(struct platform_device *dev)
356 } 340 }
357 of_free_probes(part_probe_types); 341 of_free_probes(part_probe_types);
358 342
359#ifdef CONFIG_MTD_OF_PARTS
360 if (err == 0) { 343 if (err == 0) {
361 err = of_mtd_parse_partitions(&dev->dev, dp, &info->parts); 344 err = of_mtd_parse_partitions(&dev->dev, dp, &info->parts);
362 if (err < 0) 345 if (err < 0)
363 goto err_out; 346 goto err_out;
364 } 347 }
365#endif
366 348
367 if (err == 0) { 349 if (err == 0) {
368 err = parse_obsolete_partitions(dev, info, dp); 350 err = parse_obsolete_partitions(dev, info, dp);
@@ -370,11 +352,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
370 goto err_out; 352 goto err_out;
371 } 353 }
372 354
373 if (err > 0) 355 mtd_device_register(info->cmtd, info->parts, err);
374 add_mtd_partitions(info->cmtd, info->parts, err);
375 else
376#endif
377 add_mtd_device(info->cmtd);
378 356
379 kfree(mtd_list); 357 kfree(mtd_list);
380 358
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 76a76be5a7bd..9ca1eccba4bc 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -94,14 +94,11 @@ static int platram_remove(struct platform_device *pdev)
94 return 0; 94 return 0;
95 95
96 if (info->mtd) { 96 if (info->mtd) {
97#ifdef CONFIG_MTD_PARTITIONS 97 mtd_device_unregister(info->mtd);
98 if (info->partitions) { 98 if (info->partitions) {
99 del_mtd_partitions(info->mtd);
100 if (info->free_partitions) 99 if (info->free_partitions)
101 kfree(info->partitions); 100 kfree(info->partitions);
102 } 101 }
103#endif
104 del_mtd_device(info->mtd);
105 map_destroy(info->mtd); 102 map_destroy(info->mtd);
106 } 103 }
107 104
@@ -231,7 +228,6 @@ static int platram_probe(struct platform_device *pdev)
231 /* check to see if there are any available partitions, or wether 228 /* check to see if there are any available partitions, or wether
232 * to add this device whole */ 229 * to add this device whole */
233 230
234#ifdef CONFIG_MTD_PARTITIONS
235 if (!pdata->nr_partitions) { 231 if (!pdata->nr_partitions) {
236 /* try to probe using the supplied probe type */ 232 /* try to probe using the supplied probe type */
237 if (pdata->probes) { 233 if (pdata->probes) {
@@ -239,24 +235,22 @@ static int platram_probe(struct platform_device *pdev)
239 &info->partitions, 0); 235 &info->partitions, 0);
240 info->free_partitions = 1; 236 info->free_partitions = 1;
241 if (err > 0) 237 if (err > 0)
242 err = add_mtd_partitions(info->mtd, 238 err = mtd_device_register(info->mtd,
243 info->partitions, err); 239 info->partitions, err);
244 } 240 }
245 } 241 }
246 /* use the static mapping */ 242 /* use the static mapping */
247 else 243 else
248 err = add_mtd_partitions(info->mtd, pdata->partitions, 244 err = mtd_device_register(info->mtd, pdata->partitions,
249 pdata->nr_partitions); 245 pdata->nr_partitions);
250#endif /* CONFIG_MTD_PARTITIONS */
251
252 if (add_mtd_device(info->mtd)) {
253 dev_err(&pdev->dev, "add_mtd_device() failed\n");
254 err = -ENOMEM;
255 }
256
257 if (!err) 246 if (!err)
258 dev_info(&pdev->dev, "registered mtd device\n"); 247 dev_info(&pdev->dev, "registered mtd device\n");
259 248
249 /* add the whole device. */
250 err = mtd_device_register(info->mtd, NULL, 0);
251 if (err)
252 dev_err(&pdev->dev, "failed to register the entire device\n");
253
260 return err; 254 return err;
261 255
262 exit_free: 256 exit_free:
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
index 64aea6acd48e..744ca5cacc9b 100644
--- a/drivers/mtd/maps/pmcmsp-flash.c
+++ b/drivers/mtd/maps/pmcmsp-flash.c
@@ -173,7 +173,7 @@ static int __init init_msp_flash(void)
173 msp_flash[i] = do_map_probe("cfi_probe", &msp_maps[i]); 173 msp_flash[i] = do_map_probe("cfi_probe", &msp_maps[i]);
174 if (msp_flash[i]) { 174 if (msp_flash[i]) {
175 msp_flash[i]->owner = THIS_MODULE; 175 msp_flash[i]->owner = THIS_MODULE;
176 add_mtd_partitions(msp_flash[i], msp_parts[i], pcnt); 176 mtd_device_register(msp_flash[i], msp_parts[i], pcnt);
177 } else { 177 } else {
178 printk(KERN_ERR "map probe failed for flash\n"); 178 printk(KERN_ERR "map probe failed for flash\n");
179 ret = -ENXIO; 179 ret = -ENXIO;
@@ -188,7 +188,7 @@ static int __init init_msp_flash(void)
188 188
189cleanup_loop: 189cleanup_loop:
190 while (i--) { 190 while (i--) {
191 del_mtd_partitions(msp_flash[i]); 191 mtd_device_unregister(msp_flash[i]);
192 map_destroy(msp_flash[i]); 192 map_destroy(msp_flash[i]);
193 kfree(msp_maps[i].name); 193 kfree(msp_maps[i].name);
194 iounmap(msp_maps[i].virt); 194 iounmap(msp_maps[i].virt);
@@ -207,7 +207,7 @@ static void __exit cleanup_msp_flash(void)
207 int i; 207 int i;
208 208
209 for (i = 0; i < fcnt; i++) { 209 for (i = 0; i < fcnt; i++) {
210 del_mtd_partitions(msp_flash[i]); 210 mtd_device_unregister(msp_flash[i]);
211 map_destroy(msp_flash[i]); 211 map_destroy(msp_flash[i]);
212 iounmap((void *)msp_maps[i].virt); 212 iounmap((void *)msp_maps[i].virt);
213 213
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index d8ae634d347e..f59d62f74d44 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -104,23 +104,18 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
104 } 104 }
105 info->mtd->owner = THIS_MODULE; 105 info->mtd->owner = THIS_MODULE;
106 106
107#ifdef CONFIG_MTD_PARTITIONS
108 ret = parse_mtd_partitions(info->mtd, probes, &parts, 0); 107 ret = parse_mtd_partitions(info->mtd, probes, &parts, 0);
109 108
110 if (ret > 0) { 109 if (ret > 0) {
111 info->nr_parts = ret; 110 info->nr_parts = ret;
112 info->parts = parts; 111 info->parts = parts;
113 } 112 }
114#endif
115 113
116 if (info->nr_parts) { 114 if (!info->nr_parts)
117 add_mtd_partitions(info->mtd, info->parts,
118 info->nr_parts);
119 } else {
120 printk("Registering %s as whole device\n", 115 printk("Registering %s as whole device\n",
121 info->map.name); 116 info->map.name);
122 add_mtd_device(info->mtd); 117
123 } 118 mtd_device_register(info->mtd, info->parts, info->nr_parts);
124 119
125 platform_set_drvdata(pdev, info); 120 platform_set_drvdata(pdev, info);
126 return 0; 121 return 0;
@@ -132,12 +127,7 @@ static int __devexit pxa2xx_flash_remove(struct platform_device *dev)
132 127
133 platform_set_drvdata(dev, NULL); 128 platform_set_drvdata(dev, NULL);
134 129
135#ifdef CONFIG_MTD_PARTITIONS 130 mtd_device_unregister(info->mtd);
136 if (info->nr_parts)
137 del_mtd_partitions(info->mtd);
138 else
139#endif
140 del_mtd_device(info->mtd);
141 131
142 map_destroy(info->mtd); 132 map_destroy(info->mtd);
143 iounmap(info->map.virt); 133 iounmap(info->map.virt);
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 83ed64512c5e..761fb459d2c7 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -25,10 +25,8 @@
25struct rbtx4939_flash_info { 25struct rbtx4939_flash_info {
26 struct mtd_info *mtd; 26 struct mtd_info *mtd;
27 struct map_info map; 27 struct map_info map;
28#ifdef CONFIG_MTD_PARTITIONS
29 int nr_parts; 28 int nr_parts;
30 struct mtd_partition *parts; 29 struct mtd_partition *parts;
31#endif
32}; 30};
33 31
34static int rbtx4939_flash_remove(struct platform_device *dev) 32static int rbtx4939_flash_remove(struct platform_device *dev)
@@ -41,28 +39,18 @@ static int rbtx4939_flash_remove(struct platform_device *dev)
41 platform_set_drvdata(dev, NULL); 39 platform_set_drvdata(dev, NULL);
42 40
43 if (info->mtd) { 41 if (info->mtd) {
44#ifdef CONFIG_MTD_PARTITIONS
45 struct rbtx4939_flash_data *pdata = dev->dev.platform_data; 42 struct rbtx4939_flash_data *pdata = dev->dev.platform_data;
46 43
47 if (info->nr_parts) { 44 if (info->nr_parts)
48 del_mtd_partitions(info->mtd);
49 kfree(info->parts); 45 kfree(info->parts);
50 } else if (pdata->nr_parts) 46 mtd_device_unregister(info->mtd);
51 del_mtd_partitions(info->mtd);
52 else
53 del_mtd_device(info->mtd);
54#else
55 del_mtd_device(info->mtd);
56#endif
57 map_destroy(info->mtd); 47 map_destroy(info->mtd);
58 } 48 }
59 return 0; 49 return 0;
60} 50}
61 51
62static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 52static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
63#ifdef CONFIG_MTD_PARTITIONS
64static const char *part_probe_types[] = { "cmdlinepart", NULL }; 53static const char *part_probe_types[] = { "cmdlinepart", NULL };
65#endif
66 54
67static int rbtx4939_flash_probe(struct platform_device *dev) 55static int rbtx4939_flash_probe(struct platform_device *dev)
68{ 56{
@@ -120,23 +108,21 @@ static int rbtx4939_flash_probe(struct platform_device *dev)
120 if (err) 108 if (err)
121 goto err_out; 109 goto err_out;
122 110
123#ifdef CONFIG_MTD_PARTITIONS
124 err = parse_mtd_partitions(info->mtd, part_probe_types, 111 err = parse_mtd_partitions(info->mtd, part_probe_types,
125 &info->parts, 0); 112 &info->parts, 0);
126 if (err > 0) { 113 if (err > 0) {
127 add_mtd_partitions(info->mtd, info->parts, err); 114 mtd_device_register(info->mtd, info->parts, err);
128 info->nr_parts = err; 115 info->nr_parts = err;
129 return 0; 116 return 0;
130 } 117 }
131 118
132 if (pdata->nr_parts) { 119 if (pdata->nr_parts) {
133 pr_notice("Using rbtx4939 partition information\n"); 120 pr_notice("Using rbtx4939 partition information\n");
134 add_mtd_partitions(info->mtd, pdata->parts, pdata->nr_parts); 121 mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
135 return 0; 122 return 0;
136 } 123 }
137#endif
138 124
139 add_mtd_device(info->mtd); 125 mtd_device_register(info->mtd, NULL, 0);
140 return 0; 126 return 0;
141 127
142err_out: 128err_out:
diff --git a/drivers/mtd/maps/rpxlite.c b/drivers/mtd/maps/rpxlite.c
index 3e3ef53d4fd4..ed88225bf667 100644
--- a/drivers/mtd/maps/rpxlite.c
+++ b/drivers/mtd/maps/rpxlite.c
@@ -36,7 +36,7 @@ static int __init init_rpxlite(void)
36 mymtd = do_map_probe("cfi_probe", &rpxlite_map); 36 mymtd = do_map_probe("cfi_probe", &rpxlite_map);
37 if (mymtd) { 37 if (mymtd) {
38 mymtd->owner = THIS_MODULE; 38 mymtd->owner = THIS_MODULE;
39 add_mtd_device(mymtd); 39 mtd_device_register(mymtd, NULL, 0);
40 return 0; 40 return 0;
41 } 41 }
42 42
@@ -47,7 +47,7 @@ static int __init init_rpxlite(void)
47static void __exit cleanup_rpxlite(void) 47static void __exit cleanup_rpxlite(void)
48{ 48{
49 if (mymtd) { 49 if (mymtd) {
50 del_mtd_device(mymtd); 50 mtd_device_unregister(mymtd);
51 map_destroy(mymtd); 51 map_destroy(mymtd);
52 } 52 }
53 if (rpxlite_map.virt) { 53 if (rpxlite_map.virt) {
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index da875908ea8e..a9b5e0e5c4c5 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -226,12 +226,7 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla
226 int i; 226 int i;
227 227
228 if (info->mtd) { 228 if (info->mtd) {
229 if (info->nr_parts == 0) 229 mtd_device_unregister(info->mtd);
230 del_mtd_device(info->mtd);
231#ifdef CONFIG_MTD_PARTITIONS
232 else
233 del_mtd_partitions(info->mtd);
234#endif
235 if (info->mtd != info->subdev[0].mtd) 230 if (info->mtd != info->subdev[0].mtd)
236 mtd_concat_destroy(info->mtd); 231 mtd_concat_destroy(info->mtd);
237 } 232 }
@@ -363,28 +358,24 @@ static int __devinit sa1100_mtd_probe(struct platform_device *pdev)
363 /* 358 /*
364 * Partition selection stuff. 359 * Partition selection stuff.
365 */ 360 */
366#ifdef CONFIG_MTD_PARTITIONS
367 nr_parts = parse_mtd_partitions(info->mtd, part_probes, &parts, 0); 361 nr_parts = parse_mtd_partitions(info->mtd, part_probes, &parts, 0);
368 if (nr_parts > 0) { 362 if (nr_parts > 0) {
369 info->parts = parts; 363 info->parts = parts;
370 part_type = "dynamic"; 364 part_type = "dynamic";
371 } else 365 } else {
372#endif
373 {
374 parts = plat->parts; 366 parts = plat->parts;
375 nr_parts = plat->nr_parts; 367 nr_parts = plat->nr_parts;
376 part_type = "static"; 368 part_type = "static";
377 } 369 }
378 370
379 if (nr_parts == 0) { 371 if (nr_parts == 0)
380 printk(KERN_NOTICE "SA1100 flash: no partition info " 372 printk(KERN_NOTICE "SA1100 flash: no partition info "
381 "available, registering whole flash\n"); 373 "available, registering whole flash\n");
382 add_mtd_device(info->mtd); 374 else
383 } else {
384 printk(KERN_NOTICE "SA1100 flash: using %s partition " 375 printk(KERN_NOTICE "SA1100 flash: using %s partition "
385 "definition\n", part_type); 376 "definition\n", part_type);
386 add_mtd_partitions(info->mtd, parts, nr_parts); 377
387 } 378 mtd_device_register(info->mtd, parts, nr_parts);
388 379
389 info->nr_parts = nr_parts; 380 info->nr_parts = nr_parts;
390 381
diff --git a/drivers/mtd/maps/sbc_gxx.c b/drivers/mtd/maps/sbc_gxx.c
index 04b2781fc627..556a2dfe94c5 100644
--- a/drivers/mtd/maps/sbc_gxx.c
+++ b/drivers/mtd/maps/sbc_gxx.c
@@ -182,7 +182,7 @@ static struct mtd_info *all_mtd;
182static void cleanup_sbc_gxx(void) 182static void cleanup_sbc_gxx(void)
183{ 183{
184 if( all_mtd ) { 184 if( all_mtd ) {
185 del_mtd_partitions( all_mtd ); 185 mtd_device_unregister(all_mtd);
186 map_destroy( all_mtd ); 186 map_destroy( all_mtd );
187 } 187 }
188 188
@@ -223,7 +223,7 @@ static int __init init_sbc_gxx(void)
223 all_mtd->owner = THIS_MODULE; 223 all_mtd->owner = THIS_MODULE;
224 224
225 /* Create MTD devices for each partition. */ 225 /* Create MTD devices for each partition. */
226 add_mtd_partitions(all_mtd, partition_info, NUM_PARTITIONS ); 226 mtd_device_register(all_mtd, partition_info, NUM_PARTITIONS);
227 227
228 return 0; 228 return 0;
229} 229}
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
index 4d8aaaf4bb76..8fead8e46bce 100644
--- a/drivers/mtd/maps/sc520cdp.c
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -266,10 +266,10 @@ static int __init init_sc520cdp(void)
266 /* Combine the two flash banks into a single MTD device & register it: */ 266 /* Combine the two flash banks into a single MTD device & register it: */
267 merged_mtd = mtd_concat_create(mymtd, 2, "SC520CDP Flash Banks #0 and #1"); 267 merged_mtd = mtd_concat_create(mymtd, 2, "SC520CDP Flash Banks #0 and #1");
268 if(merged_mtd) 268 if(merged_mtd)
269 add_mtd_device(merged_mtd); 269 mtd_device_register(merged_mtd, NULL, 0);
270 } 270 }
271 if(devices_found == 3) /* register the third (DIL-Flash) device */ 271 if(devices_found == 3) /* register the third (DIL-Flash) device */
272 add_mtd_device(mymtd[2]); 272 mtd_device_register(mymtd[2], NULL, 0);
273 return(devices_found ? 0 : -ENXIO); 273 return(devices_found ? 0 : -ENXIO);
274} 274}
275 275
@@ -278,11 +278,11 @@ static void __exit cleanup_sc520cdp(void)
278 int i; 278 int i;
279 279
280 if (merged_mtd) { 280 if (merged_mtd) {
281 del_mtd_device(merged_mtd); 281 mtd_device_unregister(merged_mtd);
282 mtd_concat_destroy(merged_mtd); 282 mtd_concat_destroy(merged_mtd);
283 } 283 }
284 if (mymtd[2]) 284 if (mymtd[2])
285 del_mtd_device(mymtd[2]); 285 mtd_device_unregister(mymtd[2]);
286 286
287 for (i = 0; i < NUM_FLASH_BANKS; i++) { 287 for (i = 0; i < NUM_FLASH_BANKS; i++) {
288 if (mymtd[i]) 288 if (mymtd[i])
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 7e329f09a548..d88c8426bb0f 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -180,7 +180,7 @@ scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
180 180
181 scb2_mtd->owner = THIS_MODULE; 181 scb2_mtd->owner = THIS_MODULE;
182 if (scb2_fixup_mtd(scb2_mtd) < 0) { 182 if (scb2_fixup_mtd(scb2_mtd) < 0) {
183 del_mtd_device(scb2_mtd); 183 mtd_device_unregister(scb2_mtd);
184 map_destroy(scb2_mtd); 184 map_destroy(scb2_mtd);
185 iounmap(scb2_ioaddr); 185 iounmap(scb2_ioaddr);
186 if (!region_fail) 186 if (!region_fail)
@@ -192,7 +192,7 @@ scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
192 (unsigned long long)scb2_mtd->size, 192 (unsigned long long)scb2_mtd->size,
193 (unsigned long long)(SCB2_WINDOW - scb2_mtd->size)); 193 (unsigned long long)(SCB2_WINDOW - scb2_mtd->size));
194 194
195 add_mtd_device(scb2_mtd); 195 mtd_device_register(scb2_mtd, NULL, 0);
196 196
197 return 0; 197 return 0;
198} 198}
@@ -207,7 +207,7 @@ scb2_flash_remove(struct pci_dev *dev)
207 if (scb2_mtd->lock) 207 if (scb2_mtd->lock)
208 scb2_mtd->lock(scb2_mtd, 0, scb2_mtd->size); 208 scb2_mtd->lock(scb2_mtd, 0, scb2_mtd->size);
209 209
210 del_mtd_device(scb2_mtd); 210 mtd_device_unregister(scb2_mtd);
211 map_destroy(scb2_mtd); 211 map_destroy(scb2_mtd);
212 212
213 iounmap(scb2_ioaddr); 213 iounmap(scb2_ioaddr);
diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c
index 027e628a4f1d..f1c1f737d0d7 100644
--- a/drivers/mtd/maps/scx200_docflash.c
+++ b/drivers/mtd/maps/scx200_docflash.c
@@ -44,7 +44,6 @@ static struct resource docmem = {
44 44
45static struct mtd_info *mymtd; 45static struct mtd_info *mymtd;
46 46
47#ifdef CONFIG_MTD_PARTITIONS
48static struct mtd_partition partition_info[] = { 47static struct mtd_partition partition_info[] = {
49 { 48 {
50 .name = "DOCCS Boot kernel", 49 .name = "DOCCS Boot kernel",
@@ -68,8 +67,6 @@ static struct mtd_partition partition_info[] = {
68 }, 67 },
69}; 68};
70#define NUM_PARTITIONS ARRAY_SIZE(partition_info) 69#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
71#endif
72
73 70
74static struct map_info scx200_docflash_map = { 71static struct map_info scx200_docflash_map = {
75 .name = "NatSemi SCx200 DOCCS Flash", 72 .name = "NatSemi SCx200 DOCCS Flash",
@@ -198,24 +195,17 @@ static int __init init_scx200_docflash(void)
198 195
199 mymtd->owner = THIS_MODULE; 196 mymtd->owner = THIS_MODULE;
200 197
201#ifdef CONFIG_MTD_PARTITIONS
202 partition_info[3].offset = mymtd->size-partition_info[3].size; 198 partition_info[3].offset = mymtd->size-partition_info[3].size;
203 partition_info[2].size = partition_info[3].offset-partition_info[2].offset; 199 partition_info[2].size = partition_info[3].offset-partition_info[2].offset;
204 add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS); 200 mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
205#else 201
206 add_mtd_device(mymtd);
207#endif
208 return 0; 202 return 0;
209} 203}
210 204
211static void __exit cleanup_scx200_docflash(void) 205static void __exit cleanup_scx200_docflash(void)
212{ 206{
213 if (mymtd) { 207 if (mymtd) {
214#ifdef CONFIG_MTD_PARTITIONS 208 mtd_device_unregister(mymtd);
215 del_mtd_partitions(mymtd);
216#else
217 del_mtd_device(mymtd);
218#endif
219 map_destroy(mymtd); 209 map_destroy(mymtd);
220 } 210 }
221 if (scx200_docflash_map.virt) { 211 if (scx200_docflash_map.virt) {
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index 0eb41d9c6786..cbf6bade9354 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -89,7 +89,7 @@ static int __init init_soleng_maps(void)
89 eprom_mtd = do_map_probe("map_rom", &soleng_eprom_map); 89 eprom_mtd = do_map_probe("map_rom", &soleng_eprom_map);
90 if (eprom_mtd) { 90 if (eprom_mtd) {
91 eprom_mtd->owner = THIS_MODULE; 91 eprom_mtd->owner = THIS_MODULE;
92 add_mtd_device(eprom_mtd); 92 mtd_device_register(eprom_mtd, NULL, 0);
93 } 93 }
94 94
95 nr_parts = parse_mtd_partitions(flash_mtd, probes, &parsed_parts, 0); 95 nr_parts = parse_mtd_partitions(flash_mtd, probes, &parsed_parts, 0);
@@ -104,9 +104,9 @@ static int __init init_soleng_maps(void)
104#endif /* CONFIG_MTD_SUPERH_RESERVE */ 104#endif /* CONFIG_MTD_SUPERH_RESERVE */
105 105
106 if (nr_parts > 0) 106 if (nr_parts > 0)
107 add_mtd_partitions(flash_mtd, parsed_parts, nr_parts); 107 mtd_device_register(flash_mtd, parsed_parts, nr_parts);
108 else 108 else
109 add_mtd_device(flash_mtd); 109 mtd_device_register(flash_mtd, NULL, 0);
110 110
111 return 0; 111 return 0;
112} 112}
@@ -114,14 +114,14 @@ static int __init init_soleng_maps(void)
114static void __exit cleanup_soleng_maps(void) 114static void __exit cleanup_soleng_maps(void)
115{ 115{
116 if (eprom_mtd) { 116 if (eprom_mtd) {
117 del_mtd_device(eprom_mtd); 117 mtd_device_unregister(eprom_mtd);
118 map_destroy(eprom_mtd); 118 map_destroy(eprom_mtd);
119 } 119 }
120 120
121 if (parsed_parts) 121 if (parsed_parts)
122 del_mtd_partitions(flash_mtd); 122 mtd_device_unregister(flash_mtd);
123 else 123 else
124 del_mtd_device(flash_mtd); 124 mtd_device_unregister(flash_mtd);
125 map_destroy(flash_mtd); 125 map_destroy(flash_mtd);
126} 126}
127 127
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 3f1cb328a574..2d66234f57cb 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -101,7 +101,7 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp)
101 101
102 up->mtd->owner = THIS_MODULE; 102 up->mtd->owner = THIS_MODULE;
103 103
104 add_mtd_device(up->mtd); 104 mtd_device_register(up->mtd, NULL, 0);
105 105
106 dev_set_drvdata(&op->dev, up); 106 dev_set_drvdata(&op->dev, up);
107 107
@@ -126,7 +126,7 @@ static int __devexit uflash_remove(struct platform_device *op)
126 struct uflash_dev *up = dev_get_drvdata(&op->dev); 126 struct uflash_dev *up = dev_get_drvdata(&op->dev);
127 127
128 if (up->mtd) { 128 if (up->mtd) {
129 del_mtd_device(up->mtd); 129 mtd_device_unregister(up->mtd);
130 map_destroy(up->mtd); 130 map_destroy(up->mtd);
131 } 131 }
132 if (up->map.virt) { 132 if (up->map.virt) {
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
index 0718dfb3ee64..d78587990e7e 100644
--- a/drivers/mtd/maps/tqm8xxl.c
+++ b/drivers/mtd/maps/tqm8xxl.c
@@ -62,7 +62,6 @@ static void __iomem *start_scan_addr;
62 * "struct map_desc *_io_desc" for the corresponding machine. 62 * "struct map_desc *_io_desc" for the corresponding machine.
63 */ 63 */
64 64
65#ifdef CONFIG_MTD_PARTITIONS
66/* Currently, TQM8xxL has up to 8MiB flash */ 65/* Currently, TQM8xxL has up to 8MiB flash */
67static unsigned long tqm8xxl_max_flash_size = 0x00800000; 66static unsigned long tqm8xxl_max_flash_size = 0x00800000;
68 67
@@ -107,7 +106,6 @@ static struct mtd_partition tqm8xxl_fs_partitions[] = {
107 //.size = MTDPART_SIZ_FULL, 106 //.size = MTDPART_SIZ_FULL,
108 } 107 }
109}; 108};
110#endif
111 109
112static int __init init_tqm_mtd(void) 110static int __init init_tqm_mtd(void)
113{ 111{
@@ -188,7 +186,6 @@ static int __init init_tqm_mtd(void)
188 goto error_mem; 186 goto error_mem;
189 } 187 }
190 188
191#ifdef CONFIG_MTD_PARTITIONS
192 /* 189 /*
193 * Select Static partition definitions 190 * Select Static partition definitions
194 */ 191 */
@@ -201,21 +198,14 @@ static int __init init_tqm_mtd(void)
201 part_banks[1].nums = ARRAY_SIZE(tqm8xxl_fs_partitions); 198 part_banks[1].nums = ARRAY_SIZE(tqm8xxl_fs_partitions);
202 199
203 for(idx = 0; idx < num_banks ; idx++) { 200 for(idx = 0; idx < num_banks ; idx++) {
204 if (part_banks[idx].nums == 0) { 201 if (part_banks[idx].nums == 0)
205 printk(KERN_NOTICE "TQM flash%d: no partition info available, registering whole flash at once\n", idx); 202 printk(KERN_NOTICE "TQM flash%d: no partition info available, registering whole flash at once\n", idx);
206 add_mtd_device(mtd_banks[idx]); 203 else
207 } else {
208 printk(KERN_NOTICE "TQM flash%d: Using %s partition definition\n", 204 printk(KERN_NOTICE "TQM flash%d: Using %s partition definition\n",
209 idx, part_banks[idx].type); 205 idx, part_banks[idx].type);
210 add_mtd_partitions(mtd_banks[idx], part_banks[idx].mtd_part, 206 mtd_device_register(mtd_banks[idx], part_banks[idx].mtd_part,
211 part_banks[idx].nums); 207 part_banks[idx].nums);
212 }
213 } 208 }
214#else
215 printk(KERN_NOTICE "TQM flash: registering %d whole flash banks at once\n", num_banks);
216 for(idx = 0 ; idx < num_banks ; idx++)
217 add_mtd_device(mtd_banks[idx]);
218#endif
219 return 0; 209 return 0;
220error_mem: 210error_mem:
221 for(idx = 0 ; idx < FLASH_BANK_MAX ; idx++) { 211 for(idx = 0 ; idx < FLASH_BANK_MAX ; idx++) {
@@ -237,7 +227,7 @@ static void __exit cleanup_tqm_mtd(void)
237 for(idx = 0 ; idx < num_banks ; idx++) { 227 for(idx = 0 ; idx < num_banks ; idx++) {
238 /* destroy mtd_info previously allocated */ 228 /* destroy mtd_info previously allocated */
239 if (mtd_banks[idx]) { 229 if (mtd_banks[idx]) {
240 del_mtd_partitions(mtd_banks[idx]); 230 mtd_device_unregister(mtd_banks[idx]);
241 map_destroy(mtd_banks[idx]); 231 map_destroy(mtd_banks[idx]);
242 } 232 }
243 /* release map_info not used anymore */ 233 /* release map_info not used anymore */
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
index e02dfa9d4ddd..d1d671daf235 100644
--- a/drivers/mtd/maps/ts5500_flash.c
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -89,7 +89,7 @@ static int __init init_ts5500_map(void)
89 } 89 }
90 90
91 mymtd->owner = THIS_MODULE; 91 mymtd->owner = THIS_MODULE;
92 add_mtd_partitions(mymtd, ts5500_partitions, NUM_PARTITIONS); 92 mtd_device_register(mymtd, ts5500_partitions, NUM_PARTITIONS);
93 93
94 return 0; 94 return 0;
95 95
@@ -102,7 +102,7 @@ err2:
102static void __exit cleanup_ts5500_map(void) 102static void __exit cleanup_ts5500_map(void)
103{ 103{
104 if (mymtd) { 104 if (mymtd) {
105 del_mtd_partitions(mymtd); 105 mtd_device_unregister(mymtd);
106 map_destroy(mymtd); 106 map_destroy(mymtd);
107 } 107 }
108 108
diff --git a/drivers/mtd/maps/tsunami_flash.c b/drivers/mtd/maps/tsunami_flash.c
index 77a8bfc02577..1de390e1c2fb 100644
--- a/drivers/mtd/maps/tsunami_flash.c
+++ b/drivers/mtd/maps/tsunami_flash.c
@@ -76,7 +76,7 @@ static void __exit cleanup_tsunami_flash(void)
76 struct mtd_info *mtd; 76 struct mtd_info *mtd;
77 mtd = tsunami_flash_mtd; 77 mtd = tsunami_flash_mtd;
78 if (mtd) { 78 if (mtd) {
79 del_mtd_device(mtd); 79 mtd_device_unregister(mtd);
80 map_destroy(mtd); 80 map_destroy(mtd);
81 } 81 }
82 tsunami_flash_mtd = 0; 82 tsunami_flash_mtd = 0;
@@ -97,7 +97,7 @@ static int __init init_tsunami_flash(void)
97 } 97 }
98 if (tsunami_flash_mtd) { 98 if (tsunami_flash_mtd) {
99 tsunami_flash_mtd->owner = THIS_MODULE; 99 tsunami_flash_mtd->owner = THIS_MODULE;
100 add_mtd_device(tsunami_flash_mtd); 100 mtd_device_register(tsunami_flash_mtd, NULL, 0);
101 return 0; 101 return 0;
102 } 102 }
103 return -ENXIO; 103 return -ENXIO;
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 35009294b435..6793074f3f40 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -89,11 +89,7 @@ static int __init uclinux_mtd_init(void)
89 mtd->priv = mapp; 89 mtd->priv = mapp;
90 90
91 uclinux_ram_mtdinfo = mtd; 91 uclinux_ram_mtdinfo = mtd;
92#ifdef CONFIG_MTD_PARTITIONS 92 mtd_device_register(mtd, uclinux_romfs, NUM_PARTITIONS);
93 add_mtd_partitions(mtd, uclinux_romfs, NUM_PARTITIONS);
94#else
95 add_mtd_device(mtd);
96#endif
97 93
98 return(0); 94 return(0);
99} 95}
@@ -103,11 +99,7 @@ static int __init uclinux_mtd_init(void)
103static void __exit uclinux_mtd_cleanup(void) 99static void __exit uclinux_mtd_cleanup(void)
104{ 100{
105 if (uclinux_ram_mtdinfo) { 101 if (uclinux_ram_mtdinfo) {
106#ifdef CONFIG_MTD_PARTITIONS 102 mtd_device_unregister(uclinux_ram_mtdinfo);
107 del_mtd_partitions(uclinux_ram_mtdinfo);
108#else
109 del_mtd_device(uclinux_ram_mtdinfo);
110#endif
111 map_destroy(uclinux_ram_mtdinfo); 103 map_destroy(uclinux_ram_mtdinfo);
112 uclinux_ram_mtdinfo = NULL; 104 uclinux_ram_mtdinfo = NULL;
113 } 105 }
diff --git a/drivers/mtd/maps/vmax301.c b/drivers/mtd/maps/vmax301.c
index 6adaa6acc193..5e68de73eabc 100644
--- a/drivers/mtd/maps/vmax301.c
+++ b/drivers/mtd/maps/vmax301.c
@@ -138,7 +138,7 @@ static void __exit cleanup_vmax301(void)
138 138
139 for (i=0; i<2; i++) { 139 for (i=0; i<2; i++) {
140 if (vmax_mtd[i]) { 140 if (vmax_mtd[i]) {
141 del_mtd_device(vmax_mtd[i]); 141 mtd_device_unregister(vmax_mtd[i]);
142 map_destroy(vmax_mtd[i]); 142 map_destroy(vmax_mtd[i]);
143 } 143 }
144 } 144 }
@@ -176,7 +176,7 @@ static int __init init_vmax301(void)
176 vmax_mtd[i] = do_map_probe("map_rom", &vmax_map[i]); 176 vmax_mtd[i] = do_map_probe("map_rom", &vmax_map[i]);
177 if (vmax_mtd[i]) { 177 if (vmax_mtd[i]) {
178 vmax_mtd[i]->owner = THIS_MODULE; 178 vmax_mtd[i]->owner = THIS_MODULE;
179 add_mtd_device(vmax_mtd[i]); 179 mtd_device_register(vmax_mtd[i], NULL, 0);
180 } 180 }
181 } 181 }
182 182
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index 4afc167731ef..3a04b078576a 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -563,7 +563,7 @@ static void vmu_queryblocks(struct mapleq *mq)
563 goto fail_cache_create; 563 goto fail_cache_create;
564 part_cur->pcache = pcache; 564 part_cur->pcache = pcache;
565 565
566 error = add_mtd_device(mtd_cur); 566 error = mtd_device_register(mtd_cur, NULL, 0);
567 if (error) 567 if (error)
568 goto fail_mtd_register; 568 goto fail_mtd_register;
569 569
@@ -709,7 +709,7 @@ static void __devexit vmu_disconnect(struct maple_device *mdev)
709 for (x = 0; x < card->partitions; x++) { 709 for (x = 0; x < card->partitions; x++) {
710 mpart = ((card->mtd)[x]).priv; 710 mpart = ((card->mtd)[x]).priv;
711 mpart->mdev = NULL; 711 mpart->mdev = NULL;
712 del_mtd_device(&((card->mtd)[x])); 712 mtd_device_unregister(&((card->mtd)[x]));
713 kfree(((card->parts)[x]).name); 713 kfree(((card->parts)[x]).name);
714 } 714 }
715 kfree(card->parts); 715 kfree(card->parts);
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index 933a2b6598b4..901ce968efae 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -132,17 +132,20 @@ static int __init init_sbc82xx_flash(void)
132 nr_parts = parse_mtd_partitions(sbcmtd[i], part_probes, 132 nr_parts = parse_mtd_partitions(sbcmtd[i], part_probes,
133 &sbcmtd_parts[i], 0); 133 &sbcmtd_parts[i], 0);
134 if (nr_parts > 0) { 134 if (nr_parts > 0) {
135 add_mtd_partitions (sbcmtd[i], sbcmtd_parts[i], nr_parts); 135 mtd_device_register(sbcmtd[i], sbcmtd_parts[i],
136 nr_parts);
136 continue; 137 continue;
137 } 138 }
138 139
139 /* No partitioning detected. Use default */ 140 /* No partitioning detected. Use default */
140 if (i == 2) { 141 if (i == 2) {
141 add_mtd_device(sbcmtd[i]); 142 mtd_device_register(sbcmtd[i], NULL, 0);
142 } else if (i == bigflash) { 143 } else if (i == bigflash) {
143 add_mtd_partitions (sbcmtd[i], bigflash_parts, ARRAY_SIZE(bigflash_parts)); 144 mtd_device_register(sbcmtd[i], bigflash_parts,
145 ARRAY_SIZE(bigflash_parts));
144 } else { 146 } else {
145 add_mtd_partitions (sbcmtd[i], smallflash_parts, ARRAY_SIZE(smallflash_parts)); 147 mtd_device_register(sbcmtd[i], smallflash_parts,
148 ARRAY_SIZE(smallflash_parts));
146 } 149 }
147 } 150 }
148 return 0; 151 return 0;
@@ -157,9 +160,9 @@ static void __exit cleanup_sbc82xx_flash(void)
157 continue; 160 continue;
158 161
159 if (i<2 || sbcmtd_parts[i]) 162 if (i<2 || sbcmtd_parts[i])
160 del_mtd_partitions(sbcmtd[i]); 163 mtd_device_unregister(sbcmtd[i]);
161 else 164 else
162 del_mtd_device(sbcmtd[i]); 165 mtd_device_unregister(sbcmtd[i]);
163 166
164 kfree(sbcmtd_parts[i]); 167 kfree(sbcmtd_parts[i]);
165 map_destroy(sbcmtd[i]); 168 map_destroy(sbcmtd[i]);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index a534e1f0c348..ca385697446e 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -221,15 +221,33 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
221 kref_get(&dev->ref); 221 kref_get(&dev->ref);
222 __module_get(dev->tr->owner); 222 __module_get(dev->tr->owner);
223 223
224 if (dev->mtd) { 224 if (!dev->mtd)
225 ret = dev->tr->open ? dev->tr->open(dev) : 0; 225 goto unlock;
226 __get_mtd_device(dev->mtd); 226
227 if (dev->tr->open) {
228 ret = dev->tr->open(dev);
229 if (ret)
230 goto error_put;
227 } 231 }
228 232
233 ret = __get_mtd_device(dev->mtd);
234 if (ret)
235 goto error_release;
236
229unlock: 237unlock:
230 mutex_unlock(&dev->lock); 238 mutex_unlock(&dev->lock);
231 blktrans_dev_put(dev); 239 blktrans_dev_put(dev);
232 return ret; 240 return ret;
241
242error_release:
243 if (dev->tr->release)
244 dev->tr->release(dev);
245error_put:
246 module_put(dev->tr->owner);
247 kref_put(&dev->ref, blktrans_dev_release);
248 mutex_unlock(&dev->lock);
249 blktrans_dev_put(dev);
250 return ret;
233} 251}
234 252
235static int blktrans_release(struct gendisk *disk, fmode_t mode) 253static int blktrans_release(struct gendisk *disk, fmode_t mode)
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 4c36ef66a46b..3f92731a5b9e 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -166,10 +166,23 @@ static int mtd_close(struct inode *inode, struct file *file)
166 return 0; 166 return 0;
167} /* mtd_close */ 167} /* mtd_close */
168 168
169/* FIXME: This _really_ needs to die. In 2.5, we should lock the 169/* Back in June 2001, dwmw2 wrote:
170 userspace buffer down and use it directly with readv/writev. 170 *
171*/ 171 * FIXME: This _really_ needs to die. In 2.5, we should lock the
172#define MAX_KMALLOC_SIZE 0x20000 172 * userspace buffer down and use it directly with readv/writev.
173 *
174 * The implementation below, using mtd_kmalloc_up_to, mitigates
175 * allocation failures when the system is under low-memory situations
176 * or if memory is highly fragmented at the cost of reducing the
177 * performance of the requested transfer due to a smaller buffer size.
178 *
179 * A more complex but more memory-efficient implementation based on
180 * get_user_pages and iovecs to cover extents of those pages is a
181 * longer-term goal, as intimated by dwmw2 above. However, for the
182 * write case, this requires yet more complex head and tail transfer
183 * handling when those head and tail offsets and sizes are such that
184 * alignment requirements are not met in the NAND subdriver.
185 */
173 186
174static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos) 187static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
175{ 188{
@@ -179,6 +192,7 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
179 size_t total_retlen=0; 192 size_t total_retlen=0;
180 int ret=0; 193 int ret=0;
181 int len; 194 int len;
195 size_t size = count;
182 char *kbuf; 196 char *kbuf;
183 197
184 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); 198 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
@@ -189,23 +203,12 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
189 if (!count) 203 if (!count)
190 return 0; 204 return 0;
191 205
192 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers 206 kbuf = mtd_kmalloc_up_to(mtd, &size);
193 and pass them directly to the MTD functions */
194
195 if (count > MAX_KMALLOC_SIZE)
196 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
197 else
198 kbuf=kmalloc(count, GFP_KERNEL);
199
200 if (!kbuf) 207 if (!kbuf)
201 return -ENOMEM; 208 return -ENOMEM;
202 209
203 while (count) { 210 while (count) {
204 211 len = min_t(size_t, count, size);
205 if (count > MAX_KMALLOC_SIZE)
206 len = MAX_KMALLOC_SIZE;
207 else
208 len = count;
209 212
210 switch (mfi->mode) { 213 switch (mfi->mode) {
211 case MTD_MODE_OTP_FACTORY: 214 case MTD_MODE_OTP_FACTORY:
@@ -268,6 +271,7 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
268{ 271{
269 struct mtd_file_info *mfi = file->private_data; 272 struct mtd_file_info *mfi = file->private_data;
270 struct mtd_info *mtd = mfi->mtd; 273 struct mtd_info *mtd = mfi->mtd;
274 size_t size = count;
271 char *kbuf; 275 char *kbuf;
272 size_t retlen; 276 size_t retlen;
273 size_t total_retlen=0; 277 size_t total_retlen=0;
@@ -285,20 +289,12 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
285 if (!count) 289 if (!count)
286 return 0; 290 return 0;
287 291
288 if (count > MAX_KMALLOC_SIZE) 292 kbuf = mtd_kmalloc_up_to(mtd, &size);
289 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
290 else
291 kbuf=kmalloc(count, GFP_KERNEL);
292
293 if (!kbuf) 293 if (!kbuf)
294 return -ENOMEM; 294 return -ENOMEM;
295 295
296 while (count) { 296 while (count) {
297 297 len = min_t(size_t, count, size);
298 if (count > MAX_KMALLOC_SIZE)
299 len = MAX_KMALLOC_SIZE;
300 else
301 len = count;
302 298
303 if (copy_from_user(kbuf, buf, len)) { 299 if (copy_from_user(kbuf, buf, len)) {
304 kfree(kbuf); 300 kfree(kbuf);
@@ -512,7 +508,6 @@ static int shrink_ecclayout(const struct nand_ecclayout *from,
512 return 0; 508 return 0;
513} 509}
514 510
515#ifdef CONFIG_MTD_PARTITIONS
516static int mtd_blkpg_ioctl(struct mtd_info *mtd, 511static int mtd_blkpg_ioctl(struct mtd_info *mtd,
517 struct blkpg_ioctl_arg __user *arg) 512 struct blkpg_ioctl_arg __user *arg)
518{ 513{
@@ -548,8 +543,6 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd,
548 return -EINVAL; 543 return -EINVAL;
549 } 544 }
550} 545}
551#endif
552
553 546
554static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) 547static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
555{ 548{
@@ -941,7 +934,6 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
941 break; 934 break;
942 } 935 }
943 936
944#ifdef CONFIG_MTD_PARTITIONS
945 case BLKPG: 937 case BLKPG:
946 { 938 {
947 ret = mtd_blkpg_ioctl(mtd, 939 ret = mtd_blkpg_ioctl(mtd,
@@ -955,7 +947,6 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
955 ret = 0; 947 ret = 0;
956 break; 948 break;
957 } 949 }
958#endif
959 950
960 default: 951 default:
961 ret = -ENOTTY; 952 ret = -ENOTTY;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 5060e608ea5d..e601672a5305 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -319,7 +319,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
319 if (!(mtd->flags & MTD_WRITEABLE)) 319 if (!(mtd->flags & MTD_WRITEABLE))
320 return -EROFS; 320 return -EROFS;
321 321
322 ops->retlen = 0; 322 ops->retlen = ops->oobretlen = 0;
323 323
324 for (i = 0; i < concat->num_subdev; i++) { 324 for (i = 0; i < concat->num_subdev; i++) {
325 struct mtd_info *subdev = concat->subdev[i]; 325 struct mtd_info *subdev = concat->subdev[i];
@@ -334,7 +334,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
334 devops.len = subdev->size - to; 334 devops.len = subdev->size - to;
335 335
336 err = subdev->write_oob(subdev, to, &devops); 336 err = subdev->write_oob(subdev, to, &devops);
337 ops->retlen += devops.retlen; 337 ops->retlen += devops.oobretlen;
338 if (err) 338 if (err)
339 return err; 339 return err;
340 340
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index da69bc8a5a7d..c510aff289a8 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -24,6 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/ptrace.h> 26#include <linux/ptrace.h>
27#include <linux/seq_file.h>
27#include <linux/string.h> 28#include <linux/string.h>
28#include <linux/timer.h> 29#include <linux/timer.h>
29#include <linux/major.h> 30#include <linux/major.h>
@@ -37,6 +38,7 @@
37#include <linux/gfp.h> 38#include <linux/gfp.h>
38 39
39#include <linux/mtd/mtd.h> 40#include <linux/mtd/mtd.h>
41#include <linux/mtd/partitions.h>
40 42
41#include "mtdcore.h" 43#include "mtdcore.h"
42/* 44/*
@@ -391,7 +393,7 @@ fail_locked:
391 * if the requested device does not appear to be present in the list. 393 * if the requested device does not appear to be present in the list.
392 */ 394 */
393 395
394int del_mtd_device (struct mtd_info *mtd) 396int del_mtd_device(struct mtd_info *mtd)
395{ 397{
396 int ret; 398 int ret;
397 struct mtd_notifier *not; 399 struct mtd_notifier *not;
@@ -427,6 +429,50 @@ out_error:
427} 429}
428 430
429/** 431/**
432 * mtd_device_register - register an MTD device.
433 *
434 * @master: the MTD device to register
435 * @parts: the partitions to register - only valid if nr_parts > 0
436 * @nr_parts: the number of partitions in parts. If zero then the full MTD
437 * device is registered
438 *
439 * Register an MTD device with the system and optionally, a number of
440 * partitions. If nr_parts is 0 then the whole device is registered, otherwise
441 * only the partitions are registered. To register both the full device *and*
442 * the partitions, call mtd_device_register() twice, once with nr_parts == 0
443 * and once equal to the number of partitions.
444 */
445int mtd_device_register(struct mtd_info *master,
446 const struct mtd_partition *parts,
447 int nr_parts)
448{
449 return parts ? add_mtd_partitions(master, parts, nr_parts) :
450 add_mtd_device(master);
451}
452EXPORT_SYMBOL_GPL(mtd_device_register);
453
454/**
455 * mtd_device_unregister - unregister an existing MTD device.
456 *
457 * @master: the MTD device to unregister. This will unregister both the master
458 * and any partitions if registered.
459 */
460int mtd_device_unregister(struct mtd_info *master)
461{
462 int err;
463
464 err = del_mtd_partitions(master);
465 if (err)
466 return err;
467
468 if (!device_is_registered(&master->dev))
469 return 0;
470
471 return del_mtd_device(master);
472}
473EXPORT_SYMBOL_GPL(mtd_device_unregister);
474
475/**
430 * register_mtd_user - register a 'user' of MTD devices. 476 * register_mtd_user - register a 'user' of MTD devices.
431 * @new: pointer to notifier info structure 477 * @new: pointer to notifier info structure
432 * 478 *
@@ -443,7 +489,7 @@ void register_mtd_user (struct mtd_notifier *new)
443 489
444 list_add(&new->list, &mtd_notifiers); 490 list_add(&new->list, &mtd_notifiers);
445 491
446 __module_get(THIS_MODULE); 492 __module_get(THIS_MODULE);
447 493
448 mtd_for_each_device(mtd) 494 mtd_for_each_device(mtd)
449 new->add(mtd); 495 new->add(mtd);
@@ -532,7 +578,6 @@ int __get_mtd_device(struct mtd_info *mtd)
532 return -ENODEV; 578 return -ENODEV;
533 579
534 if (mtd->get_device) { 580 if (mtd->get_device) {
535
536 err = mtd->get_device(mtd); 581 err = mtd->get_device(mtd);
537 582
538 if (err) { 583 if (err) {
@@ -570,21 +615,13 @@ struct mtd_info *get_mtd_device_nm(const char *name)
570 if (!mtd) 615 if (!mtd)
571 goto out_unlock; 616 goto out_unlock;
572 617
573 if (!try_module_get(mtd->owner)) 618 err = __get_mtd_device(mtd);
619 if (err)
574 goto out_unlock; 620 goto out_unlock;
575 621
576 if (mtd->get_device) {
577 err = mtd->get_device(mtd);
578 if (err)
579 goto out_put;
580 }
581
582 mtd->usecount++;
583 mutex_unlock(&mtd_table_mutex); 622 mutex_unlock(&mtd_table_mutex);
584 return mtd; 623 return mtd;
585 624
586out_put:
587 module_put(mtd->owner);
588out_unlock: 625out_unlock:
589 mutex_unlock(&mtd_table_mutex); 626 mutex_unlock(&mtd_table_mutex);
590 return ERR_PTR(err); 627 return ERR_PTR(err);
@@ -638,8 +675,54 @@ int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
638 return ret; 675 return ret;
639} 676}
640 677
641EXPORT_SYMBOL_GPL(add_mtd_device); 678/**
642EXPORT_SYMBOL_GPL(del_mtd_device); 679 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
680 * @size: A pointer to the ideal or maximum size of the allocation. Points
681 * to the actual allocation size on success.
682 *
683 * This routine attempts to allocate a contiguous kernel buffer up to
684 * the specified size, backing off the size of the request exponentially
685 * until the request succeeds or until the allocation size falls below
686 * the system page size. This attempts to make sure it does not adversely
687 * impact system performance, so when allocating more than one page, we
688 * ask the memory allocator to avoid re-trying, swapping, writing back
689 * or performing I/O.
690 *
691 * Note, this function also makes sure that the allocated buffer is aligned to
692 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
693 *
694 * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
695 * to handle smaller (i.e. degraded) buffer allocations under low- or
696 * fragmented-memory situations where such reduced allocations, from a
697 * requested ideal, are allowed.
698 *
699 * Returns a pointer to the allocated buffer on success; otherwise, NULL.
700 */
701void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
702{
703 gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
704 __GFP_NORETRY | __GFP_NO_KSWAPD;
705 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
706 void *kbuf;
707
708 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
709
710 while (*size > min_alloc) {
711 kbuf = kmalloc(*size, flags);
712 if (kbuf)
713 return kbuf;
714
715 *size >>= 1;
716 *size = ALIGN(*size, mtd->writesize);
717 }
718
719 /*
720 * For the last resort allocation allow 'kmalloc()' to do all sorts of
721 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
722 */
723 return kmalloc(*size, GFP_KERNEL);
724}
725
643EXPORT_SYMBOL_GPL(get_mtd_device); 726EXPORT_SYMBOL_GPL(get_mtd_device);
644EXPORT_SYMBOL_GPL(get_mtd_device_nm); 727EXPORT_SYMBOL_GPL(get_mtd_device_nm);
645EXPORT_SYMBOL_GPL(__get_mtd_device); 728EXPORT_SYMBOL_GPL(__get_mtd_device);
@@ -648,6 +731,7 @@ EXPORT_SYMBOL_GPL(__put_mtd_device);
648EXPORT_SYMBOL_GPL(register_mtd_user); 731EXPORT_SYMBOL_GPL(register_mtd_user);
649EXPORT_SYMBOL_GPL(unregister_mtd_user); 732EXPORT_SYMBOL_GPL(unregister_mtd_user);
650EXPORT_SYMBOL_GPL(default_mtd_writev); 733EXPORT_SYMBOL_GPL(default_mtd_writev);
734EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
651 735
652#ifdef CONFIG_PROC_FS 736#ifdef CONFIG_PROC_FS
653 737
@@ -656,44 +740,32 @@ EXPORT_SYMBOL_GPL(default_mtd_writev);
656 740
657static struct proc_dir_entry *proc_mtd; 741static struct proc_dir_entry *proc_mtd;
658 742
659static inline int mtd_proc_info(char *buf, struct mtd_info *this) 743static int mtd_proc_show(struct seq_file *m, void *v)
660{
661 return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", this->index,
662 (unsigned long long)this->size,
663 this->erasesize, this->name);
664}
665
666static int mtd_read_proc (char *page, char **start, off_t off, int count,
667 int *eof, void *data_unused)
668{ 744{
669 struct mtd_info *mtd; 745 struct mtd_info *mtd;
670 int len, l;
671 off_t begin = 0;
672 746
747 seq_puts(m, "dev: size erasesize name\n");
673 mutex_lock(&mtd_table_mutex); 748 mutex_lock(&mtd_table_mutex);
674
675 len = sprintf(page, "dev: size erasesize name\n");
676 mtd_for_each_device(mtd) { 749 mtd_for_each_device(mtd) {
677 l = mtd_proc_info(page + len, mtd); 750 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
678 len += l; 751 mtd->index, (unsigned long long)mtd->size,
679 if (len+begin > off+count) 752 mtd->erasesize, mtd->name);
680 goto done; 753 }
681 if (len+begin < off) {
682 begin += len;
683 len = 0;
684 }
685 }
686
687 *eof = 1;
688
689done:
690 mutex_unlock(&mtd_table_mutex); 754 mutex_unlock(&mtd_table_mutex);
691 if (off >= len+begin) 755 return 0;
692 return 0; 756}
693 *start = page + (off-begin); 757
694 return ((count < begin+len-off) ? count : begin+len-off); 758static int mtd_proc_open(struct inode *inode, struct file *file)
759{
760 return single_open(file, mtd_proc_show, NULL);
695} 761}
696 762
763static const struct file_operations mtd_proc_ops = {
764 .open = mtd_proc_open,
765 .read = seq_read,
766 .llseek = seq_lseek,
767 .release = single_release,
768};
697#endif /* CONFIG_PROC_FS */ 769#endif /* CONFIG_PROC_FS */
698 770
699/*====================================================================*/ 771/*====================================================================*/
@@ -734,8 +806,7 @@ static int __init init_mtd(void)
734 goto err_bdi3; 806 goto err_bdi3;
735 807
736#ifdef CONFIG_PROC_FS 808#ifdef CONFIG_PROC_FS
737 if ((proc_mtd = create_proc_entry( "mtd", 0, NULL ))) 809 proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
738 proc_mtd->read_proc = mtd_read_proc;
739#endif /* CONFIG_PROC_FS */ 810#endif /* CONFIG_PROC_FS */
740 return 0; 811 return 0;
741 812
@@ -753,7 +824,7 @@ err_reg:
753static void __exit cleanup_mtd(void) 824static void __exit cleanup_mtd(void)
754{ 825{
755#ifdef CONFIG_PROC_FS 826#ifdef CONFIG_PROC_FS
756 if (proc_mtd) 827 if (proc_mtd)
757 remove_proc_entry( "mtd", NULL); 828 remove_proc_entry( "mtd", NULL);
758#endif /* CONFIG_PROC_FS */ 829#endif /* CONFIG_PROC_FS */
759 class_unregister(&mtd_class); 830 class_unregister(&mtd_class);
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index 6a64fdebc898..0ed6126b4c1f 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -10,6 +10,12 @@
10extern struct mutex mtd_table_mutex; 10extern struct mutex mtd_table_mutex;
11extern struct mtd_info *__mtd_next_device(int i); 11extern struct mtd_info *__mtd_next_device(int i);
12 12
13extern int add_mtd_device(struct mtd_info *mtd);
14extern int del_mtd_device(struct mtd_info *mtd);
15extern int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *,
16 int);
17extern int del_mtd_partitions(struct mtd_info *);
18
13#define mtd_for_each_device(mtd) \ 19#define mtd_for_each_device(mtd) \
14 for ((mtd) = __mtd_next_device(0); \ 20 for ((mtd) = __mtd_next_device(0); \
15 (mtd) != NULL; \ 21 (mtd) != NULL; \
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 0a4760174782..630be3e7da04 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -31,6 +31,8 @@
31#include <linux/mtd/partitions.h> 31#include <linux/mtd/partitions.h>
32#include <linux/err.h> 32#include <linux/err.h>
33 33
34#include "mtdcore.h"
35
34/* Our partition linked list */ 36/* Our partition linked list */
35static LIST_HEAD(mtd_partitions); 37static LIST_HEAD(mtd_partitions);
36static DEFINE_MUTEX(mtd_partitions_mutex); 38static DEFINE_MUTEX(mtd_partitions_mutex);
@@ -376,7 +378,6 @@ int del_mtd_partitions(struct mtd_info *master)
376 378
377 return err; 379 return err;
378} 380}
379EXPORT_SYMBOL(del_mtd_partitions);
380 381
381static struct mtd_part *allocate_partition(struct mtd_info *master, 382static struct mtd_part *allocate_partition(struct mtd_info *master,
382 const struct mtd_partition *part, int partno, 383 const struct mtd_partition *part, int partno,
@@ -671,7 +672,6 @@ int add_mtd_partitions(struct mtd_info *master,
671 672
672 return 0; 673 return 0;
673} 674}
674EXPORT_SYMBOL(add_mtd_partitions);
675 675
676static DEFINE_SPINLOCK(part_parser_lock); 676static DEFINE_SPINLOCK(part_parser_lock);
677static LIST_HEAD(part_parsers); 677static LIST_HEAD(part_parsers);
@@ -722,11 +722,8 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
722 parser = get_partition_parser(*types); 722 parser = get_partition_parser(*types);
723 if (!parser && !request_module("%s", *types)) 723 if (!parser && !request_module("%s", *types))
724 parser = get_partition_parser(*types); 724 parser = get_partition_parser(*types);
725 if (!parser) { 725 if (!parser)
726 printk(KERN_NOTICE "%s partition parsing not available\n",
727 *types);
728 continue; 726 continue;
729 }
730 ret = (*parser->parse_fn)(master, pparts, origin); 727 ret = (*parser->parse_fn)(master, pparts, origin);
731 if (ret > 0) { 728 if (ret > 0) {
732 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", 729 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index fed215c4cfa1..fd7885327611 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -1450,7 +1450,13 @@ static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
1450 } 1450 }
1451 1451
1452 oinfo = mtd->ecclayout; 1452 oinfo = mtd->ecclayout;
1453 if (!mtd->oobsize || !oinfo || oinfo->oobavail < MTDSWAP_OOBSIZE) { 1453 if (!oinfo) {
1454 printk(KERN_ERR "%s: mtd%d does not have OOB\n",
1455 MTDSWAP_PREFIX, mtd->index);
1456 return;
1457 }
1458
1459 if (!mtd->oobsize || oinfo->oobavail < MTDSWAP_OOBSIZE) {
1454 printk(KERN_ERR "%s: Not enough free bytes in OOB, " 1460 printk(KERN_ERR "%s: Not enough free bytes in OOB, "
1455 "%d available, %zu needed.\n", 1461 "%d available, %zu needed.\n",
1456 MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE); 1462 MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index edec457d361d..4c3425235adc 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -92,7 +92,7 @@ config MTD_NAND_EDB7312
92 92
93config MTD_NAND_H1900 93config MTD_NAND_H1900
94 tristate "iPAQ H1900 flash" 94 tristate "iPAQ H1900 flash"
95 depends on ARCH_PXA && MTD_PARTITIONS 95 depends on ARCH_PXA
96 help 96 help
97 This enables the driver for the iPAQ h1900 flash. 97 This enables the driver for the iPAQ h1900 flash.
98 98
@@ -419,7 +419,6 @@ config MTD_NAND_TMIO
419 419
420config MTD_NAND_NANDSIM 420config MTD_NAND_NANDSIM
421 tristate "Support for NAND Flash Simulator" 421 tristate "Support for NAND Flash Simulator"
422 depends on MTD_PARTITIONS
423 help 422 help
424 The simulator may simulate various NAND flash chips for the 423 The simulator may simulate various NAND flash chips for the
425 MTD nand layer. 424 MTD nand layer.
@@ -513,7 +512,7 @@ config MTD_NAND_SOCRATES
513 512
514config MTD_NAND_NUC900 513config MTD_NAND_NUC900
515 tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards." 514 tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards."
516 depends on ARCH_W90X900 && MTD_PARTITIONS 515 depends on ARCH_W90X900
517 help 516 help
518 This enables the driver for the NAND Flash on evaluation board based 517 This enables the driver for the NAND Flash on evaluation board based
519 on w90p910 / NUC9xx. 518 on w90p910 / NUC9xx.
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 8691e0482ed2..eb40ea829ab2 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -120,7 +120,7 @@ static void alauda_delete(struct kref *kref)
120 struct alauda *al = container_of(kref, struct alauda, kref); 120 struct alauda *al = container_of(kref, struct alauda, kref);
121 121
122 if (al->mtd) { 122 if (al->mtd) {
123 del_mtd_device(al->mtd); 123 mtd_device_unregister(al->mtd);
124 kfree(al->mtd); 124 kfree(al->mtd);
125 } 125 }
126 usb_put_dev(al->dev); 126 usb_put_dev(al->dev);
@@ -592,7 +592,7 @@ static int alauda_init_media(struct alauda *al)
592 mtd->priv = al; 592 mtd->priv = al;
593 mtd->owner = THIS_MODULE; 593 mtd->owner = THIS_MODULE;
594 594
595 err = add_mtd_device(mtd); 595 err = mtd_device_register(mtd, NULL, 0);
596 if (err) { 596 if (err) {
597 err = -ENFILE; 597 err = -ENFILE;
598 goto error; 598 goto error;
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index bc65bf71e1a2..78017eb9318e 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -235,8 +235,8 @@ static int __devinit ams_delta_init(struct platform_device *pdev)
235 } 235 }
236 236
237 /* Register the partitions */ 237 /* Register the partitions */
238 add_mtd_partitions(ams_delta_mtd, partition_info, 238 mtd_device_register(ams_delta_mtd, partition_info,
239 ARRAY_SIZE(partition_info)); 239 ARRAY_SIZE(partition_info));
240 240
241 goto out; 241 goto out;
242 242
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 950646aa4c4b..b300705d41cb 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -30,6 +30,7 @@
30#include <linux/mtd/nand.h> 30#include <linux/mtd/nand.h>
31#include <linux/mtd/partitions.h> 31#include <linux/mtd/partitions.h>
32 32
33#include <linux/dmaengine.h>
33#include <linux/gpio.h> 34#include <linux/gpio.h>
34#include <linux/io.h> 35#include <linux/io.h>
35 36
@@ -494,11 +495,8 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
494 struct resource *regs; 495 struct resource *regs;
495 struct resource *mem; 496 struct resource *mem;
496 int res; 497 int res;
497
498#ifdef CONFIG_MTD_PARTITIONS
499 struct mtd_partition *partitions = NULL; 498 struct mtd_partition *partitions = NULL;
500 int num_partitions = 0; 499 int num_partitions = 0;
501#endif
502 500
503 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 501 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
504 if (!mem) { 502 if (!mem) {
@@ -656,7 +654,6 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
656 goto err_scan_tail; 654 goto err_scan_tail;
657 } 655 }
658 656
659#ifdef CONFIG_MTD_PARTITIONS
660#ifdef CONFIG_MTD_CMDLINE_PARTS 657#ifdef CONFIG_MTD_CMDLINE_PARTS
661 mtd->name = "atmel_nand"; 658 mtd->name = "atmel_nand";
662 num_partitions = parse_mtd_partitions(mtd, part_probes, 659 num_partitions = parse_mtd_partitions(mtd, part_probes,
@@ -672,17 +669,11 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
672 goto err_no_partitions; 669 goto err_no_partitions;
673 } 670 }
674 671
675 res = add_mtd_partitions(mtd, partitions, num_partitions); 672 res = mtd_device_register(mtd, partitions, num_partitions);
676#else
677 res = add_mtd_device(mtd);
678#endif
679
680 if (!res) 673 if (!res)
681 return res; 674 return res;
682 675
683#ifdef CONFIG_MTD_PARTITIONS
684err_no_partitions: 676err_no_partitions:
685#endif
686 nand_release(mtd); 677 nand_release(mtd);
687err_scan_tail: 678err_scan_tail:
688err_scan_ident: 679err_scan_ident:
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 5d513b54a7d7..e7767eef4505 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -581,7 +581,8 @@ static int __init au1xxx_nand_init(void)
581 } 581 }
582 582
583 /* Register the partitions */ 583 /* Register the partitions */
584 add_mtd_partitions(au1550_mtd, partition_info, ARRAY_SIZE(partition_info)); 584 mtd_device_register(au1550_mtd, partition_info,
585 ARRAY_SIZE(partition_info));
585 586
586 return 0; 587 return 0;
587 588
diff --git a/drivers/mtd/nand/autcpu12.c b/drivers/mtd/nand/autcpu12.c
index 0911cf03db80..eddc9a224985 100644
--- a/drivers/mtd/nand/autcpu12.c
+++ b/drivers/mtd/nand/autcpu12.c
@@ -185,20 +185,20 @@ static int __init autcpu12_init(void)
185 /* Register the partitions */ 185 /* Register the partitions */
186 switch (autcpu12_mtd->size) { 186 switch (autcpu12_mtd->size) {
187 case SZ_16M: 187 case SZ_16M:
188 add_mtd_partitions(autcpu12_mtd, partition_info16k, 188 mtd_device_register(autcpu12_mtd, partition_info16k,
189 NUM_PARTITIONS16K); 189 NUM_PARTITIONS16K);
190 break; 190 break;
191 case SZ_32M: 191 case SZ_32M:
192 add_mtd_partitions(autcpu12_mtd, partition_info32k, 192 mtd_device_register(autcpu12_mtd, partition_info32k,
193 NUM_PARTITIONS32K); 193 NUM_PARTITIONS32K);
194 break; 194 break;
195 case SZ_64M: 195 case SZ_64M:
196 add_mtd_partitions(autcpu12_mtd, partition_info64k, 196 mtd_device_register(autcpu12_mtd, partition_info64k,
197 NUM_PARTITIONS64K); 197 NUM_PARTITIONS64K);
198 break; 198 break;
199 case SZ_128M: 199 case SZ_128M:
200 add_mtd_partitions(autcpu12_mtd, partition_info128k, 200 mtd_device_register(autcpu12_mtd, partition_info128k,
201 NUM_PARTITIONS128K); 201 NUM_PARTITIONS128K);
202 break; 202 break;
203 default: 203 default:
204 printk("Unsupported SmartMedia device\n"); 204 printk("Unsupported SmartMedia device\n");
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index dfe262c726fb..9ec280738a9a 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -52,9 +52,7 @@
52static const __devinitconst char gBanner[] = KERN_INFO \ 52static const __devinitconst char gBanner[] = KERN_INFO \
53 "BCM UMI MTD NAND Driver: 1.00\n"; 53 "BCM UMI MTD NAND Driver: 1.00\n";
54 54
55#ifdef CONFIG_MTD_PARTITIONS
56const char *part_probes[] = { "cmdlinepart", NULL }; 55const char *part_probes[] = { "cmdlinepart", NULL };
57#endif
58 56
59#if NAND_ECC_BCH 57#if NAND_ECC_BCH
60static uint8_t scan_ff_pattern[] = { 0xff }; 58static uint8_t scan_ff_pattern[] = { 0xff };
@@ -509,7 +507,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
509 kfree(board_mtd); 507 kfree(board_mtd);
510 return -EIO; 508 return -EIO;
511 } 509 }
512 add_mtd_partitions(board_mtd, partition_info, nr_partitions); 510 mtd_device_register(board_mtd, partition_info, nr_partitions);
513 } 511 }
514 512
515 /* Return happy */ 513 /* Return happy */
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 79947bea4d57..dd899cb5d366 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -659,15 +659,10 @@ static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info)
659static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info) 659static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
660{ 660{
661 struct mtd_info *mtd = &info->mtd; 661 struct mtd_info *mtd = &info->mtd;
662
663#ifdef CONFIG_MTD_PARTITIONS
664 struct mtd_partition *parts = info->platform->partitions; 662 struct mtd_partition *parts = info->platform->partitions;
665 int nr = info->platform->nr_partitions; 663 int nr = info->platform->nr_partitions;
666 664
667 return add_mtd_partitions(mtd, parts, nr); 665 return mtd_device_register(mtd, parts, nr);
668#else
669 return add_mtd_device(mtd);
670#endif
671} 666}
672 667
673static int __devexit bf5xx_nand_remove(struct platform_device *pdev) 668static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index e06c8983978e..87ebb4e5b0c3 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -90,9 +90,7 @@ static unsigned int numtimings;
90static int timing[3]; 90static int timing[3];
91module_param_array(timing, int, &numtimings, 0644); 91module_param_array(timing, int, &numtimings, 0644);
92 92
93#ifdef CONFIG_MTD_PARTITIONS
94static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL }; 93static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
95#endif
96 94
97/* Hrm. Why isn't this already conditional on something in the struct device? */ 95/* Hrm. Why isn't this already conditional on something in the struct device? */
98#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0) 96#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0)
@@ -632,10 +630,8 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
632 struct cafe_priv *cafe; 630 struct cafe_priv *cafe;
633 uint32_t ctrl; 631 uint32_t ctrl;
634 int err = 0; 632 int err = 0;
635#ifdef CONFIG_MTD_PARTITIONS
636 struct mtd_partition *parts; 633 struct mtd_partition *parts;
637 int nr_parts; 634 int nr_parts;
638#endif
639 635
640 /* Very old versions shared the same PCI ident for all three 636 /* Very old versions shared the same PCI ident for all three
641 functions on the chip. Verify the class too... */ 637 functions on the chip. Verify the class too... */
@@ -804,9 +800,8 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
804 pci_set_drvdata(pdev, mtd); 800 pci_set_drvdata(pdev, mtd);
805 801
806 /* We register the whole device first, separate from the partitions */ 802 /* We register the whole device first, separate from the partitions */
807 add_mtd_device(mtd); 803 mtd_device_register(mtd, NULL, 0);
808 804
809#ifdef CONFIG_MTD_PARTITIONS
810#ifdef CONFIG_MTD_CMDLINE_PARTS 805#ifdef CONFIG_MTD_CMDLINE_PARTS
811 mtd->name = "cafe_nand"; 806 mtd->name = "cafe_nand";
812#endif 807#endif
@@ -814,9 +809,8 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
814 if (nr_parts > 0) { 809 if (nr_parts > 0) {
815 cafe->parts = parts; 810 cafe->parts = parts;
816 dev_info(&cafe->pdev->dev, "%d partitions found\n", nr_parts); 811 dev_info(&cafe->pdev->dev, "%d partitions found\n", nr_parts);
817 add_mtd_partitions(mtd, parts, nr_parts); 812 mtd_device_register(mtd, parts, nr_parts);
818 } 813 }
819#endif
820 goto out; 814 goto out;
821 815
822 out_irq: 816 out_irq:
@@ -838,7 +832,6 @@ static void __devexit cafe_nand_remove(struct pci_dev *pdev)
838 struct mtd_info *mtd = pci_get_drvdata(pdev); 832 struct mtd_info *mtd = pci_get_drvdata(pdev);
839 struct cafe_priv *cafe = mtd->priv; 833 struct cafe_priv *cafe = mtd->priv;
840 834
841 del_mtd_device(mtd);
842 /* Disable NAND IRQ in global IRQ mask register */ 835 /* Disable NAND IRQ in global IRQ mask register */
843 cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK); 836 cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
844 free_irq(pdev->irq, mtd); 837 free_irq(pdev->irq, mtd);
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 6e6495278258..6fc043a30d1e 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -238,7 +238,7 @@ static int __init cmx270_init(void)
238 238
239 /* Register the partitions */ 239 /* Register the partitions */
240 pr_notice("Using %s partition definition\n", part_type); 240 pr_notice("Using %s partition definition\n", part_type);
241 ret = add_mtd_partitions(cmx270_nand_mtd, mtd_parts, mtd_parts_nb); 241 ret = mtd_device_register(cmx270_nand_mtd, mtd_parts, mtd_parts_nb);
242 if (ret) 242 if (ret)
243 goto err_scan; 243 goto err_scan;
244 244
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index 71c35a0b9826..f59ad1f2d5db 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -277,22 +277,15 @@ static int is_geode(void)
277 return 0; 277 return 0;
278} 278}
279 279
280
281#ifdef CONFIG_MTD_PARTITIONS
282static const char *part_probes[] = { "cmdlinepart", NULL }; 280static const char *part_probes[] = { "cmdlinepart", NULL };
283#endif
284
285 281
286static int __init cs553x_init(void) 282static int __init cs553x_init(void)
287{ 283{
288 int err = -ENXIO; 284 int err = -ENXIO;
289 int i; 285 int i;
290 uint64_t val; 286 uint64_t val;
291
292#ifdef CONFIG_MTD_PARTITIONS
293 int mtd_parts_nb = 0; 287 int mtd_parts_nb = 0;
294 struct mtd_partition *mtd_parts = NULL; 288 struct mtd_partition *mtd_parts = NULL;
295#endif
296 289
297 /* If the CPU isn't a Geode GX or LX, abort */ 290 /* If the CPU isn't a Geode GX or LX, abort */
298 if (!is_geode()) 291 if (!is_geode())
@@ -324,17 +317,11 @@ static int __init cs553x_init(void)
324 if (cs553x_mtd[i]) { 317 if (cs553x_mtd[i]) {
325 318
326 /* If any devices registered, return success. Else the last error. */ 319 /* If any devices registered, return success. Else the last error. */
327#ifdef CONFIG_MTD_PARTITIONS
328 mtd_parts_nb = parse_mtd_partitions(cs553x_mtd[i], part_probes, &mtd_parts, 0); 320 mtd_parts_nb = parse_mtd_partitions(cs553x_mtd[i], part_probes, &mtd_parts, 0);
329 if (mtd_parts_nb > 0) { 321 if (mtd_parts_nb > 0)
330 printk(KERN_NOTICE "Using command line partition definition\n"); 322 printk(KERN_NOTICE "Using command line partition definition\n");
331 add_mtd_partitions(cs553x_mtd[i], mtd_parts, mtd_parts_nb); 323 mtd_device_register(cs553x_mtd[i], mtd_parts,
332 } else { 324 mtd_parts_nb);
333 add_mtd_device(cs553x_mtd[i]);
334 }
335#else
336 add_mtd_device(cs553x_mtd[i]);
337#endif
338 err = 0; 325 err = 0;
339 } 326 }
340 } 327 }
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index aff3468867ac..1f34951ae1a7 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -530,6 +530,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
530 int ret; 530 int ret;
531 uint32_t val; 531 uint32_t val;
532 nand_ecc_modes_t ecc_mode; 532 nand_ecc_modes_t ecc_mode;
533 struct mtd_partition *mtd_parts = NULL;
534 int mtd_parts_nb = 0;
533 535
534 /* insist on board-specific configuration */ 536 /* insist on board-specific configuration */
535 if (!pdata) 537 if (!pdata)
@@ -749,41 +751,33 @@ syndrome_done:
749 if (ret < 0) 751 if (ret < 0)
750 goto err_scan; 752 goto err_scan;
751 753
752 if (mtd_has_partitions()) { 754 if (mtd_has_cmdlinepart()) {
753 struct mtd_partition *mtd_parts = NULL; 755 static const char *probes[] __initconst = {
754 int mtd_parts_nb = 0; 756 "cmdlinepart", NULL
757 };
755 758
756 if (mtd_has_cmdlinepart()) { 759 mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
757 static const char *probes[] __initconst = 760 &mtd_parts, 0);
758 { "cmdlinepart", NULL }; 761 }
759
760 mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
761 &mtd_parts, 0);
762 }
763
764 if (mtd_parts_nb <= 0) {
765 mtd_parts = pdata->parts;
766 mtd_parts_nb = pdata->nr_parts;
767 }
768 762
769 /* Register any partitions */ 763 if (mtd_parts_nb <= 0) {
770 if (mtd_parts_nb > 0) { 764 mtd_parts = pdata->parts;
771 ret = add_mtd_partitions(&info->mtd, 765 mtd_parts_nb = pdata->nr_parts;
772 mtd_parts, mtd_parts_nb); 766 }
773 if (ret == 0)
774 info->partitioned = true;
775 }
776 767
777 } else if (pdata->nr_parts) { 768 /* Register any partitions */
778 dev_warn(&pdev->dev, "ignoring %d default partitions on %s\n", 769 if (mtd_parts_nb > 0) {
779 pdata->nr_parts, info->mtd.name); 770 ret = mtd_device_register(&info->mtd, mtd_parts,
771 mtd_parts_nb);
772 if (ret == 0)
773 info->partitioned = true;
780 } 774 }
781 775
782 /* If there's no partition info, just package the whole chip 776 /* If there's no partition info, just package the whole chip
783 * as a single MTD device. 777 * as a single MTD device.
784 */ 778 */
785 if (!info->partitioned) 779 if (!info->partitioned)
786 ret = add_mtd_device(&info->mtd) ? -ENODEV : 0; 780 ret = mtd_device_register(&info->mtd, NULL, 0) ? -ENODEV : 0;
787 781
788 if (ret < 0) 782 if (ret < 0)
789 goto err_scan; 783 goto err_scan;
@@ -824,10 +818,7 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
824 struct davinci_nand_info *info = platform_get_drvdata(pdev); 818 struct davinci_nand_info *info = platform_get_drvdata(pdev);
825 int status; 819 int status;
826 820
827 if (mtd_has_partitions() && info->partitioned) 821 status = mtd_device_unregister(&info->mtd);
828 status = del_mtd_partitions(&info->mtd);
829 else
830 status = del_mtd_device(&info->mtd);
831 822
832 spin_lock_irq(&davinci_nand_lock); 823 spin_lock_irq(&davinci_nand_lock);
833 if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME) 824 if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 4633f094c510..d5276218945f 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -19,6 +19,7 @@
19 19
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/dma-mapping.h>
22#include <linux/wait.h> 23#include <linux/wait.h>
23#include <linux/mutex.h> 24#include <linux/mutex.h>
24#include <linux/slab.h> 25#include <linux/slab.h>
@@ -44,16 +45,16 @@ MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting."
44 45
45/* We define a macro here that combines all interrupts this driver uses into 46/* We define a macro here that combines all interrupts this driver uses into
46 * a single constant value, for convenience. */ 47 * a single constant value, for convenience. */
47#define DENALI_IRQ_ALL (INTR_STATUS0__DMA_CMD_COMP | \ 48#define DENALI_IRQ_ALL (INTR_STATUS__DMA_CMD_COMP | \
48 INTR_STATUS0__ECC_TRANSACTION_DONE | \ 49 INTR_STATUS__ECC_TRANSACTION_DONE | \
49 INTR_STATUS0__ECC_ERR | \ 50 INTR_STATUS__ECC_ERR | \
50 INTR_STATUS0__PROGRAM_FAIL | \ 51 INTR_STATUS__PROGRAM_FAIL | \
51 INTR_STATUS0__LOAD_COMP | \ 52 INTR_STATUS__LOAD_COMP | \
52 INTR_STATUS0__PROGRAM_COMP | \ 53 INTR_STATUS__PROGRAM_COMP | \
53 INTR_STATUS0__TIME_OUT | \ 54 INTR_STATUS__TIME_OUT | \
54 INTR_STATUS0__ERASE_FAIL | \ 55 INTR_STATUS__ERASE_FAIL | \
55 INTR_STATUS0__RST_COMP | \ 56 INTR_STATUS__RST_COMP | \
56 INTR_STATUS0__ERASE_COMP) 57 INTR_STATUS__ERASE_COMP)
57 58
58/* indicates whether or not the internal value for the flash bank is 59/* indicates whether or not the internal value for the flash bank is
59 * valid or not */ 60 * valid or not */
@@ -95,30 +96,6 @@ static const struct pci_device_id denali_pci_ids[] = {
95 { /* end: all zeroes */ } 96 { /* end: all zeroes */ }
96}; 97};
97 98
98
99/* these are static lookup tables that give us easy access to
100 * registers in the NAND controller.
101 */
102static const uint32_t intr_status_addresses[4] = {INTR_STATUS0,
103 INTR_STATUS1,
104 INTR_STATUS2,
105 INTR_STATUS3};
106
107static const uint32_t device_reset_banks[4] = {DEVICE_RESET__BANK0,
108 DEVICE_RESET__BANK1,
109 DEVICE_RESET__BANK2,
110 DEVICE_RESET__BANK3};
111
112static const uint32_t operation_timeout[4] = {INTR_STATUS0__TIME_OUT,
113 INTR_STATUS1__TIME_OUT,
114 INTR_STATUS2__TIME_OUT,
115 INTR_STATUS3__TIME_OUT};
116
117static const uint32_t reset_complete[4] = {INTR_STATUS0__RST_COMP,
118 INTR_STATUS1__RST_COMP,
119 INTR_STATUS2__RST_COMP,
120 INTR_STATUS3__RST_COMP};
121
122/* forward declarations */ 99/* forward declarations */
123static void clear_interrupts(struct denali_nand_info *denali); 100static void clear_interrupts(struct denali_nand_info *denali);
124static uint32_t wait_for_irq(struct denali_nand_info *denali, 101static uint32_t wait_for_irq(struct denali_nand_info *denali,
@@ -180,19 +157,17 @@ static void read_status(struct denali_nand_info *denali)
180static void reset_bank(struct denali_nand_info *denali) 157static void reset_bank(struct denali_nand_info *denali)
181{ 158{
182 uint32_t irq_status = 0; 159 uint32_t irq_status = 0;
183 uint32_t irq_mask = reset_complete[denali->flash_bank] | 160 uint32_t irq_mask = INTR_STATUS__RST_COMP |
184 operation_timeout[denali->flash_bank]; 161 INTR_STATUS__TIME_OUT;
185 int bank = 0;
186 162
187 clear_interrupts(denali); 163 clear_interrupts(denali);
188 164
189 bank = device_reset_banks[denali->flash_bank]; 165 iowrite32(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET);
190 iowrite32(bank, denali->flash_reg + DEVICE_RESET);
191 166
192 irq_status = wait_for_irq(denali, irq_mask); 167 irq_status = wait_for_irq(denali, irq_mask);
193 168
194 if (irq_status & operation_timeout[denali->flash_bank]) 169 if (irq_status & INTR_STATUS__TIME_OUT)
195 dev_err(&denali->dev->dev, "reset bank failed.\n"); 170 dev_err(denali->dev, "reset bank failed.\n");
196} 171}
197 172
198/* Reset the flash controller */ 173/* Reset the flash controller */
@@ -200,29 +175,28 @@ static uint16_t denali_nand_reset(struct denali_nand_info *denali)
200{ 175{
201 uint32_t i; 176 uint32_t i;
202 177
203 dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n", 178 dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
204 __FILE__, __LINE__, __func__); 179 __FILE__, __LINE__, __func__);
205 180
206 for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) 181 for (i = 0 ; i < denali->max_banks; i++)
207 iowrite32(reset_complete[i] | operation_timeout[i], 182 iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
208 denali->flash_reg + intr_status_addresses[i]); 183 denali->flash_reg + INTR_STATUS(i));
209 184
210 for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) { 185 for (i = 0 ; i < denali->max_banks; i++) {
211 iowrite32(device_reset_banks[i], 186 iowrite32(1 << i, denali->flash_reg + DEVICE_RESET);
212 denali->flash_reg + DEVICE_RESET);
213 while (!(ioread32(denali->flash_reg + 187 while (!(ioread32(denali->flash_reg +
214 intr_status_addresses[i]) & 188 INTR_STATUS(i)) &
215 (reset_complete[i] | operation_timeout[i]))) 189 (INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT)))
216 cpu_relax(); 190 cpu_relax();
217 if (ioread32(denali->flash_reg + intr_status_addresses[i]) & 191 if (ioread32(denali->flash_reg + INTR_STATUS(i)) &
218 operation_timeout[i]) 192 INTR_STATUS__TIME_OUT)
219 dev_dbg(&denali->dev->dev, 193 dev_dbg(denali->dev,
220 "NAND Reset operation timed out on bank %d\n", i); 194 "NAND Reset operation timed out on bank %d\n", i);
221 } 195 }
222 196
223 for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) 197 for (i = 0; i < denali->max_banks; i++)
224 iowrite32(reset_complete[i] | operation_timeout[i], 198 iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
225 denali->flash_reg + intr_status_addresses[i]); 199 denali->flash_reg + INTR_STATUS(i));
226 200
227 return PASS; 201 return PASS;
228} 202}
@@ -254,7 +228,7 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali,
254 uint16_t acc_clks; 228 uint16_t acc_clks;
255 uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt; 229 uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
256 230
257 dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n", 231 dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
258 __FILE__, __LINE__, __func__); 232 __FILE__, __LINE__, __func__);
259 233
260 en_lo = CEIL_DIV(Trp[mode], CLK_X); 234 en_lo = CEIL_DIV(Trp[mode], CLK_X);
@@ -291,7 +265,7 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali,
291 acc_clks++; 265 acc_clks++;
292 266
293 if ((data_invalid - acc_clks * CLK_X) < 2) 267 if ((data_invalid - acc_clks * CLK_X) < 2)
294 dev_warn(&denali->dev->dev, "%s, Line %d: Warning!\n", 268 dev_warn(denali->dev, "%s, Line %d: Warning!\n",
295 __FILE__, __LINE__); 269 __FILE__, __LINE__);
296 270
297 addr_2_data = CEIL_DIV(Tadl[mode], CLK_X); 271 addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
@@ -419,7 +393,7 @@ static void get_hynix_nand_para(struct denali_nand_info *denali,
419#endif 393#endif
420 break; 394 break;
421 default: 395 default:
422 dev_warn(&denali->dev->dev, 396 dev_warn(denali->dev,
423 "Spectra: Unknown Hynix NAND (Device ID: 0x%x)." 397 "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
424 "Will use default parameter values instead.\n", 398 "Will use default parameter values instead.\n",
425 device_id); 399 device_id);
@@ -431,17 +405,17 @@ static void get_hynix_nand_para(struct denali_nand_info *denali,
431 */ 405 */
432static void find_valid_banks(struct denali_nand_info *denali) 406static void find_valid_banks(struct denali_nand_info *denali)
433{ 407{
434 uint32_t id[LLD_MAX_FLASH_BANKS]; 408 uint32_t id[denali->max_banks];
435 int i; 409 int i;
436 410
437 denali->total_used_banks = 1; 411 denali->total_used_banks = 1;
438 for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) { 412 for (i = 0; i < denali->max_banks; i++) {
439 index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90); 413 index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90);
440 index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0); 414 index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0);
441 index_addr_read_data(denali, 415 index_addr_read_data(denali,
442 (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]); 416 (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]);
443 417
444 dev_dbg(&denali->dev->dev, 418 dev_dbg(denali->dev,
445 "Return 1st ID for bank[%d]: %x\n", i, id[i]); 419 "Return 1st ID for bank[%d]: %x\n", i, id[i]);
446 420
447 if (i == 0) { 421 if (i == 0) {
@@ -461,16 +435,27 @@ static void find_valid_banks(struct denali_nand_info *denali)
461 * Multichip support is not enabled. 435 * Multichip support is not enabled.
462 */ 436 */
463 if (denali->total_used_banks != 1) { 437 if (denali->total_used_banks != 1) {
464 dev_err(&denali->dev->dev, 438 dev_err(denali->dev,
465 "Sorry, Intel CE4100 only supports " 439 "Sorry, Intel CE4100 only supports "
466 "a single NAND device.\n"); 440 "a single NAND device.\n");
467 BUG(); 441 BUG();
468 } 442 }
469 } 443 }
470 dev_dbg(&denali->dev->dev, 444 dev_dbg(denali->dev,
471 "denali->total_used_banks: %d\n", denali->total_used_banks); 445 "denali->total_used_banks: %d\n", denali->total_used_banks);
472} 446}
473 447
448/*
449 * Use the configuration feature register to determine the maximum number of
450 * banks that the hardware supports.
451 */
452static void detect_max_banks(struct denali_nand_info *denali)
453{
454 uint32_t features = ioread32(denali->flash_reg + FEATURES);
455
456 denali->max_banks = 2 << (features & FEATURES__N_BANKS);
457}
458
474static void detect_partition_feature(struct denali_nand_info *denali) 459static void detect_partition_feature(struct denali_nand_info *denali)
475{ 460{
476 /* For MRST platform, denali->fwblks represent the 461 /* For MRST platform, denali->fwblks represent the
@@ -480,15 +465,15 @@ static void detect_partition_feature(struct denali_nand_info *denali)
480 * blocks it can't touch. 465 * blocks it can't touch.
481 * */ 466 * */
482 if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) { 467 if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
483 if ((ioread32(denali->flash_reg + PERM_SRC_ID_1) & 468 if ((ioread32(denali->flash_reg + PERM_SRC_ID(1)) &
484 PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) { 469 PERM_SRC_ID__SRCID) == SPECTRA_PARTITION_ID) {
485 denali->fwblks = 470 denali->fwblks =
486 ((ioread32(denali->flash_reg + MIN_MAX_BANK_1) & 471 ((ioread32(denali->flash_reg + MIN_MAX_BANK(1)) &
487 MIN_MAX_BANK_1__MIN_VALUE) * 472 MIN_MAX_BANK__MIN_VALUE) *
488 denali->blksperchip) 473 denali->blksperchip)
489 + 474 +
490 (ioread32(denali->flash_reg + MIN_BLK_ADDR_1) & 475 (ioread32(denali->flash_reg + MIN_BLK_ADDR(1)) &
491 MIN_BLK_ADDR_1__VALUE); 476 MIN_BLK_ADDR__VALUE);
492 } else 477 } else
493 denali->fwblks = SPECTRA_START_BLOCK; 478 denali->fwblks = SPECTRA_START_BLOCK;
494 } else 479 } else
@@ -501,7 +486,7 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
501 uint32_t id_bytes[5], addr; 486 uint32_t id_bytes[5], addr;
502 uint8_t i, maf_id, device_id; 487 uint8_t i, maf_id, device_id;
503 488
504 dev_dbg(&denali->dev->dev, 489 dev_dbg(denali->dev,
505 "%s, Line %d, Function: %s\n", 490 "%s, Line %d, Function: %s\n",
506 __FILE__, __LINE__, __func__); 491 __FILE__, __LINE__, __func__);
507 492
@@ -530,7 +515,7 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
530 get_hynix_nand_para(denali, device_id); 515 get_hynix_nand_para(denali, device_id);
531 } 516 }
532 517
533 dev_info(&denali->dev->dev, 518 dev_info(denali->dev,
534 "Dump timing register values:" 519 "Dump timing register values:"
535 "acc_clks: %d, re_2_we: %d, re_2_re: %d\n" 520 "acc_clks: %d, re_2_we: %d, re_2_re: %d\n"
536 "we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d\n" 521 "we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d\n"
@@ -560,7 +545,7 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
560static void denali_set_intr_modes(struct denali_nand_info *denali, 545static void denali_set_intr_modes(struct denali_nand_info *denali,
561 uint16_t INT_ENABLE) 546 uint16_t INT_ENABLE)
562{ 547{
563 dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n", 548 dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
564 __FILE__, __LINE__, __func__); 549 __FILE__, __LINE__, __func__);
565 550
566 if (INT_ENABLE) 551 if (INT_ENABLE)
@@ -580,6 +565,7 @@ static inline bool is_flash_bank_valid(int flash_bank)
580static void denali_irq_init(struct denali_nand_info *denali) 565static void denali_irq_init(struct denali_nand_info *denali)
581{ 566{
582 uint32_t int_mask = 0; 567 uint32_t int_mask = 0;
568 int i;
583 569
584 /* Disable global interrupts */ 570 /* Disable global interrupts */
585 denali_set_intr_modes(denali, false); 571 denali_set_intr_modes(denali, false);
@@ -587,10 +573,8 @@ static void denali_irq_init(struct denali_nand_info *denali)
587 int_mask = DENALI_IRQ_ALL; 573 int_mask = DENALI_IRQ_ALL;
588 574
589 /* Clear all status bits */ 575 /* Clear all status bits */
590 iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS0); 576 for (i = 0; i < denali->max_banks; ++i)
591 iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS1); 577 iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS(i));
592 iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS2);
593 iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS3);
594 578
595 denali_irq_enable(denali, int_mask); 579 denali_irq_enable(denali, int_mask);
596} 580}
@@ -604,10 +588,10 @@ static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
604static void denali_irq_enable(struct denali_nand_info *denali, 588static void denali_irq_enable(struct denali_nand_info *denali,
605 uint32_t int_mask) 589 uint32_t int_mask)
606{ 590{
607 iowrite32(int_mask, denali->flash_reg + INTR_EN0); 591 int i;
608 iowrite32(int_mask, denali->flash_reg + INTR_EN1); 592
609 iowrite32(int_mask, denali->flash_reg + INTR_EN2); 593 for (i = 0; i < denali->max_banks; ++i)
610 iowrite32(int_mask, denali->flash_reg + INTR_EN3); 594 iowrite32(int_mask, denali->flash_reg + INTR_EN(i));
611} 595}
612 596
613/* This function only returns when an interrupt that this driver cares about 597/* This function only returns when an interrupt that this driver cares about
@@ -624,7 +608,7 @@ static inline void clear_interrupt(struct denali_nand_info *denali,
624{ 608{
625 uint32_t intr_status_reg = 0; 609 uint32_t intr_status_reg = 0;
626 610
627 intr_status_reg = intr_status_addresses[denali->flash_bank]; 611 intr_status_reg = INTR_STATUS(denali->flash_bank);
628 612
629 iowrite32(irq_mask, denali->flash_reg + intr_status_reg); 613 iowrite32(irq_mask, denali->flash_reg + intr_status_reg);
630} 614}
@@ -645,7 +629,7 @@ static uint32_t read_interrupt_status(struct denali_nand_info *denali)
645{ 629{
646 uint32_t intr_status_reg = 0; 630 uint32_t intr_status_reg = 0;
647 631
648 intr_status_reg = intr_status_addresses[denali->flash_bank]; 632 intr_status_reg = INTR_STATUS(denali->flash_bank);
649 633
650 return ioread32(denali->flash_reg + intr_status_reg); 634 return ioread32(denali->flash_reg + intr_status_reg);
651} 635}
@@ -754,7 +738,7 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
754 irq_mask = 0; 738 irq_mask = 0;
755 739
756 if (op == DENALI_READ) 740 if (op == DENALI_READ)
757 irq_mask = INTR_STATUS0__LOAD_COMP; 741 irq_mask = INTR_STATUS__LOAD_COMP;
758 else if (op == DENALI_WRITE) 742 else if (op == DENALI_WRITE)
759 irq_mask = 0; 743 irq_mask = 0;
760 else 744 else
@@ -800,7 +784,7 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
800 irq_status = wait_for_irq(denali, irq_mask); 784 irq_status = wait_for_irq(denali, irq_mask);
801 785
802 if (irq_status == 0) { 786 if (irq_status == 0) {
803 dev_err(&denali->dev->dev, 787 dev_err(denali->dev,
804 "cmd, page, addr on timeout " 788 "cmd, page, addr on timeout "
805 "(0x%x, 0x%x, 0x%x)\n", 789 "(0x%x, 0x%x, 0x%x)\n",
806 cmd, denali->page, addr); 790 cmd, denali->page, addr);
@@ -861,8 +845,8 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
861{ 845{
862 struct denali_nand_info *denali = mtd_to_denali(mtd); 846 struct denali_nand_info *denali = mtd_to_denali(mtd);
863 uint32_t irq_status = 0; 847 uint32_t irq_status = 0;
864 uint32_t irq_mask = INTR_STATUS0__PROGRAM_COMP | 848 uint32_t irq_mask = INTR_STATUS__PROGRAM_COMP |
865 INTR_STATUS0__PROGRAM_FAIL; 849 INTR_STATUS__PROGRAM_FAIL;
866 int status = 0; 850 int status = 0;
867 851
868 denali->page = page; 852 denali->page = page;
@@ -875,11 +859,11 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
875 irq_status = wait_for_irq(denali, irq_mask); 859 irq_status = wait_for_irq(denali, irq_mask);
876 860
877 if (irq_status == 0) { 861 if (irq_status == 0) {
878 dev_err(&denali->dev->dev, "OOB write failed\n"); 862 dev_err(denali->dev, "OOB write failed\n");
879 status = -EIO; 863 status = -EIO;
880 } 864 }
881 } else { 865 } else {
882 dev_err(&denali->dev->dev, "unable to send pipeline command\n"); 866 dev_err(denali->dev, "unable to send pipeline command\n");
883 status = -EIO; 867 status = -EIO;
884 } 868 }
885 return status; 869 return status;
@@ -889,7 +873,7 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
889static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) 873static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
890{ 874{
891 struct denali_nand_info *denali = mtd_to_denali(mtd); 875 struct denali_nand_info *denali = mtd_to_denali(mtd);
892 uint32_t irq_mask = INTR_STATUS0__LOAD_COMP, 876 uint32_t irq_mask = INTR_STATUS__LOAD_COMP,
893 irq_status = 0, addr = 0x0, cmd = 0x0; 877 irq_status = 0, addr = 0x0, cmd = 0x0;
894 878
895 denali->page = page; 879 denali->page = page;
@@ -904,7 +888,7 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
904 irq_status = wait_for_irq(denali, irq_mask); 888 irq_status = wait_for_irq(denali, irq_mask);
905 889
906 if (irq_status == 0) 890 if (irq_status == 0)
907 dev_err(&denali->dev->dev, "page on OOB timeout %d\n", 891 dev_err(denali->dev, "page on OOB timeout %d\n",
908 denali->page); 892 denali->page);
909 893
910 /* We set the device back to MAIN_ACCESS here as I observed 894 /* We set the device back to MAIN_ACCESS here as I observed
@@ -944,7 +928,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
944{ 928{
945 bool check_erased_page = false; 929 bool check_erased_page = false;
946 930
947 if (irq_status & INTR_STATUS0__ECC_ERR) { 931 if (irq_status & INTR_STATUS__ECC_ERR) {
948 /* read the ECC errors. we'll ignore them for now */ 932 /* read the ECC errors. we'll ignore them for now */
949 uint32_t err_address = 0, err_correction_info = 0; 933 uint32_t err_address = 0, err_correction_info = 0;
950 uint32_t err_byte = 0, err_sector = 0, err_device = 0; 934 uint32_t err_byte = 0, err_sector = 0, err_device = 0;
@@ -995,7 +979,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
995 * for a while for this interrupt 979 * for a while for this interrupt
996 * */ 980 * */
997 while (!(read_interrupt_status(denali) & 981 while (!(read_interrupt_status(denali) &
998 INTR_STATUS0__ECC_TRANSACTION_DONE)) 982 INTR_STATUS__ECC_TRANSACTION_DONE))
999 cpu_relax(); 983 cpu_relax();
1000 clear_interrupts(denali); 984 clear_interrupts(denali);
1001 denali_set_intr_modes(denali, true); 985 denali_set_intr_modes(denali, true);
@@ -1045,14 +1029,13 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
1045 const uint8_t *buf, bool raw_xfer) 1029 const uint8_t *buf, bool raw_xfer)
1046{ 1030{
1047 struct denali_nand_info *denali = mtd_to_denali(mtd); 1031 struct denali_nand_info *denali = mtd_to_denali(mtd);
1048 struct pci_dev *pci_dev = denali->dev;
1049 1032
1050 dma_addr_t addr = denali->buf.dma_buf; 1033 dma_addr_t addr = denali->buf.dma_buf;
1051 size_t size = denali->mtd.writesize + denali->mtd.oobsize; 1034 size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1052 1035
1053 uint32_t irq_status = 0; 1036 uint32_t irq_status = 0;
1054 uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP | 1037 uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP |
1055 INTR_STATUS0__PROGRAM_FAIL; 1038 INTR_STATUS__PROGRAM_FAIL;
1056 1039
1057 /* if it is a raw xfer, we want to disable ecc, and send 1040 /* if it is a raw xfer, we want to disable ecc, and send
1058 * the spare area. 1041 * the spare area.
@@ -1071,7 +1054,7 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
1071 mtd->oobsize); 1054 mtd->oobsize);
1072 } 1055 }
1073 1056
1074 pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_TODEVICE); 1057 dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);
1075 1058
1076 clear_interrupts(denali); 1059 clear_interrupts(denali);
1077 denali_enable_dma(denali, true); 1060 denali_enable_dma(denali, true);
@@ -1082,16 +1065,16 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
1082 irq_status = wait_for_irq(denali, irq_mask); 1065 irq_status = wait_for_irq(denali, irq_mask);
1083 1066
1084 if (irq_status == 0) { 1067 if (irq_status == 0) {
1085 dev_err(&denali->dev->dev, 1068 dev_err(denali->dev,
1086 "timeout on write_page (type = %d)\n", 1069 "timeout on write_page (type = %d)\n",
1087 raw_xfer); 1070 raw_xfer);
1088 denali->status = 1071 denali->status =
1089 (irq_status & INTR_STATUS0__PROGRAM_FAIL) ? 1072 (irq_status & INTR_STATUS__PROGRAM_FAIL) ?
1090 NAND_STATUS_FAIL : PASS; 1073 NAND_STATUS_FAIL : PASS;
1091 } 1074 }
1092 1075
1093 denali_enable_dma(denali, false); 1076 denali_enable_dma(denali, false);
1094 pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_TODEVICE); 1077 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
1095} 1078}
1096 1079
1097/* NAND core entry points */ 1080/* NAND core entry points */
@@ -1139,18 +1122,17 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1139 uint8_t *buf, int page) 1122 uint8_t *buf, int page)
1140{ 1123{
1141 struct denali_nand_info *denali = mtd_to_denali(mtd); 1124 struct denali_nand_info *denali = mtd_to_denali(mtd);
1142 struct pci_dev *pci_dev = denali->dev;
1143 1125
1144 dma_addr_t addr = denali->buf.dma_buf; 1126 dma_addr_t addr = denali->buf.dma_buf;
1145 size_t size = denali->mtd.writesize + denali->mtd.oobsize; 1127 size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1146 1128
1147 uint32_t irq_status = 0; 1129 uint32_t irq_status = 0;
1148 uint32_t irq_mask = INTR_STATUS0__ECC_TRANSACTION_DONE | 1130 uint32_t irq_mask = INTR_STATUS__ECC_TRANSACTION_DONE |
1149 INTR_STATUS0__ECC_ERR; 1131 INTR_STATUS__ECC_ERR;
1150 bool check_erased_page = false; 1132 bool check_erased_page = false;
1151 1133
1152 if (page != denali->page) { 1134 if (page != denali->page) {
1153 dev_err(&denali->dev->dev, "IN %s: page %d is not" 1135 dev_err(denali->dev, "IN %s: page %d is not"
1154 " equal to denali->page %d, investigate!!", 1136 " equal to denali->page %d, investigate!!",
1155 __func__, page, denali->page); 1137 __func__, page, denali->page);
1156 BUG(); 1138 BUG();
@@ -1159,7 +1141,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1159 setup_ecc_for_xfer(denali, true, false); 1141 setup_ecc_for_xfer(denali, true, false);
1160 1142
1161 denali_enable_dma(denali, true); 1143 denali_enable_dma(denali, true);
1162 pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE); 1144 dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
1163 1145
1164 clear_interrupts(denali); 1146 clear_interrupts(denali);
1165 denali_setup_dma(denali, DENALI_READ); 1147 denali_setup_dma(denali, DENALI_READ);
@@ -1167,7 +1149,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1167 /* wait for operation to complete */ 1149 /* wait for operation to complete */
1168 irq_status = wait_for_irq(denali, irq_mask); 1150 irq_status = wait_for_irq(denali, irq_mask);
1169 1151
1170 pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE); 1152 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
1171 1153
1172 memcpy(buf, denali->buf.buf, mtd->writesize); 1154 memcpy(buf, denali->buf.buf, mtd->writesize);
1173 1155
@@ -1192,16 +1174,15 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1192 uint8_t *buf, int page) 1174 uint8_t *buf, int page)
1193{ 1175{
1194 struct denali_nand_info *denali = mtd_to_denali(mtd); 1176 struct denali_nand_info *denali = mtd_to_denali(mtd);
1195 struct pci_dev *pci_dev = denali->dev;
1196 1177
1197 dma_addr_t addr = denali->buf.dma_buf; 1178 dma_addr_t addr = denali->buf.dma_buf;
1198 size_t size = denali->mtd.writesize + denali->mtd.oobsize; 1179 size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1199 1180
1200 uint32_t irq_status = 0; 1181 uint32_t irq_status = 0;
1201 uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP; 1182 uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;
1202 1183
1203 if (page != denali->page) { 1184 if (page != denali->page) {
1204 dev_err(&denali->dev->dev, "IN %s: page %d is not" 1185 dev_err(denali->dev, "IN %s: page %d is not"
1205 " equal to denali->page %d, investigate!!", 1186 " equal to denali->page %d, investigate!!",
1206 __func__, page, denali->page); 1187 __func__, page, denali->page);
1207 BUG(); 1188 BUG();
@@ -1210,7 +1191,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1210 setup_ecc_for_xfer(denali, false, true); 1191 setup_ecc_for_xfer(denali, false, true);
1211 denali_enable_dma(denali, true); 1192 denali_enable_dma(denali, true);
1212 1193
1213 pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE); 1194 dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
1214 1195
1215 clear_interrupts(denali); 1196 clear_interrupts(denali);
1216 denali_setup_dma(denali, DENALI_READ); 1197 denali_setup_dma(denali, DENALI_READ);
@@ -1218,7 +1199,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1218 /* wait for operation to complete */ 1199 /* wait for operation to complete */
1219 irq_status = wait_for_irq(denali, irq_mask); 1200 irq_status = wait_for_irq(denali, irq_mask);
1220 1201
1221 pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE); 1202 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
1222 1203
1223 denali_enable_dma(denali, false); 1204 denali_enable_dma(denali, false);
1224 1205
@@ -1271,10 +1252,10 @@ static void denali_erase(struct mtd_info *mtd, int page)
1271 index_addr(denali, (uint32_t)cmd, 0x1); 1252 index_addr(denali, (uint32_t)cmd, 0x1);
1272 1253
1273 /* wait for erase to complete or failure to occur */ 1254 /* wait for erase to complete or failure to occur */
1274 irq_status = wait_for_irq(denali, INTR_STATUS0__ERASE_COMP | 1255 irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |
1275 INTR_STATUS0__ERASE_FAIL); 1256 INTR_STATUS__ERASE_FAIL);
1276 1257
1277 denali->status = (irq_status & INTR_STATUS0__ERASE_FAIL) ? 1258 denali->status = (irq_status & INTR_STATUS__ERASE_FAIL) ?
1278 NAND_STATUS_FAIL : PASS; 1259 NAND_STATUS_FAIL : PASS;
1279} 1260}
1280 1261
@@ -1330,7 +1311,7 @@ static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
1330 uint8_t *ecc_code) 1311 uint8_t *ecc_code)
1331{ 1312{
1332 struct denali_nand_info *denali = mtd_to_denali(mtd); 1313 struct denali_nand_info *denali = mtd_to_denali(mtd);
1333 dev_err(&denali->dev->dev, 1314 dev_err(denali->dev,
1334 "denali_ecc_calculate called unexpectedly\n"); 1315 "denali_ecc_calculate called unexpectedly\n");
1335 BUG(); 1316 BUG();
1336 return -EIO; 1317 return -EIO;
@@ -1340,7 +1321,7 @@ static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
1340 uint8_t *read_ecc, uint8_t *calc_ecc) 1321 uint8_t *read_ecc, uint8_t *calc_ecc)
1341{ 1322{
1342 struct denali_nand_info *denali = mtd_to_denali(mtd); 1323 struct denali_nand_info *denali = mtd_to_denali(mtd);
1343 dev_err(&denali->dev->dev, 1324 dev_err(denali->dev,
1344 "denali_ecc_correct called unexpectedly\n"); 1325 "denali_ecc_correct called unexpectedly\n");
1345 BUG(); 1326 BUG();
1346 return -EIO; 1327 return -EIO;
@@ -1349,7 +1330,7 @@ static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
1349static void denali_ecc_hwctl(struct mtd_info *mtd, int mode) 1330static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
1350{ 1331{
1351 struct denali_nand_info *denali = mtd_to_denali(mtd); 1332 struct denali_nand_info *denali = mtd_to_denali(mtd);
1352 dev_err(&denali->dev->dev, 1333 dev_err(denali->dev,
1353 "denali_ecc_hwctl called unexpectedly\n"); 1334 "denali_ecc_hwctl called unexpectedly\n");
1354 BUG(); 1335 BUG();
1355} 1336}
@@ -1375,6 +1356,7 @@ static void denali_hw_init(struct denali_nand_info *denali)
1375 /* Should set value for these registers when init */ 1356 /* Should set value for these registers when init */
1376 iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES); 1357 iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
1377 iowrite32(1, denali->flash_reg + ECC_ENABLE); 1358 iowrite32(1, denali->flash_reg + ECC_ENABLE);
1359 detect_max_banks(denali);
1378 denali_nand_timing_set(denali); 1360 denali_nand_timing_set(denali);
1379 denali_irq_init(denali); 1361 denali_irq_init(denali);
1380} 1362}
@@ -1484,24 +1466,22 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1484 } 1466 }
1485 1467
1486 /* Is 32-bit DMA supported? */ 1468 /* Is 32-bit DMA supported? */
1487 ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); 1469 ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
1488
1489 if (ret) { 1470 if (ret) {
1490 printk(KERN_ERR "Spectra: no usable DMA configuration\n"); 1471 printk(KERN_ERR "Spectra: no usable DMA configuration\n");
1491 goto failed_enable_dev; 1472 goto failed_enable_dev;
1492 } 1473 }
1493 denali->buf.dma_buf = 1474 denali->buf.dma_buf = dma_map_single(&dev->dev, denali->buf.buf,
1494 pci_map_single(dev, denali->buf.buf, 1475 DENALI_BUF_SIZE,
1495 DENALI_BUF_SIZE, 1476 DMA_BIDIRECTIONAL);
1496 PCI_DMA_BIDIRECTIONAL);
1497 1477
1498 if (pci_dma_mapping_error(dev, denali->buf.dma_buf)) { 1478 if (dma_mapping_error(&dev->dev, denali->buf.dma_buf)) {
1499 dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n"); 1479 dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n");
1500 goto failed_enable_dev; 1480 goto failed_enable_dev;
1501 } 1481 }
1502 1482
1503 pci_set_master(dev); 1483 pci_set_master(dev);
1504 denali->dev = dev; 1484 denali->dev = &dev->dev;
1505 denali->mtd.dev.parent = &dev->dev; 1485 denali->mtd.dev.parent = &dev->dev;
1506 1486
1507 ret = pci_request_regions(dev, DENALI_NAND_NAME); 1487 ret = pci_request_regions(dev, DENALI_NAND_NAME);
@@ -1554,7 +1534,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1554 /* scan for NAND devices attached to the controller 1534 /* scan for NAND devices attached to the controller
1555 * this is the first stage in a two step process to register 1535 * this is the first stage in a two step process to register
1556 * with the nand subsystem */ 1536 * with the nand subsystem */
1557 if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL)) { 1537 if (nand_scan_ident(&denali->mtd, denali->max_banks, NULL)) {
1558 ret = -ENXIO; 1538 ret = -ENXIO;
1559 goto failed_req_irq; 1539 goto failed_req_irq;
1560 } 1540 }
@@ -1664,7 +1644,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1664 goto failed_req_irq; 1644 goto failed_req_irq;
1665 } 1645 }
1666 1646
1667 ret = add_mtd_device(&denali->mtd); 1647 ret = mtd_device_register(&denali->mtd, NULL, 0);
1668 if (ret) { 1648 if (ret) {
1669 dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n", 1649 dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n",
1670 ret); 1650 ret);
@@ -1681,8 +1661,8 @@ failed_remap_reg:
1681failed_req_regions: 1661failed_req_regions:
1682 pci_release_regions(dev); 1662 pci_release_regions(dev);
1683failed_dma_map: 1663failed_dma_map:
1684 pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE, 1664 dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
1685 PCI_DMA_BIDIRECTIONAL); 1665 DMA_BIDIRECTIONAL);
1686failed_enable_dev: 1666failed_enable_dev:
1687 pci_disable_device(dev); 1667 pci_disable_device(dev);
1688failed_alloc_memery: 1668failed_alloc_memery:
@@ -1696,7 +1676,7 @@ static void denali_pci_remove(struct pci_dev *dev)
1696 struct denali_nand_info *denali = pci_get_drvdata(dev); 1676 struct denali_nand_info *denali = pci_get_drvdata(dev);
1697 1677
1698 nand_release(&denali->mtd); 1678 nand_release(&denali->mtd);
1699 del_mtd_device(&denali->mtd); 1679 mtd_device_unregister(&denali->mtd);
1700 1680
1701 denali_irq_cleanup(dev->irq, denali); 1681 denali_irq_cleanup(dev->irq, denali);
1702 1682
@@ -1704,8 +1684,8 @@ static void denali_pci_remove(struct pci_dev *dev)
1704 iounmap(denali->flash_mem); 1684 iounmap(denali->flash_mem);
1705 pci_release_regions(dev); 1685 pci_release_regions(dev);
1706 pci_disable_device(dev); 1686 pci_disable_device(dev);
1707 pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE, 1687 dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
1708 PCI_DMA_BIDIRECTIONAL); 1688 DMA_BIDIRECTIONAL);
1709 pci_set_drvdata(dev, NULL); 1689 pci_set_drvdata(dev, NULL);
1710 kfree(denali); 1690 kfree(denali);
1711} 1691}
@@ -1721,8 +1701,7 @@ static struct pci_driver denali_pci_driver = {
1721 1701
1722static int __devinit denali_init(void) 1702static int __devinit denali_init(void)
1723{ 1703{
1724 printk(KERN_INFO "Spectra MTD driver built on %s @ %s\n", 1704 printk(KERN_INFO "Spectra MTD driver\n");
1725 __DATE__, __TIME__);
1726 return pci_register_driver(&denali_pci_driver); 1705 return pci_register_driver(&denali_pci_driver);
1727} 1706}
1728 1707
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index 3918bcb1561e..fabb9d56b39e 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -211,185 +211,46 @@
211#define TRANSFER_MODE 0x400 211#define TRANSFER_MODE 0x400
212#define TRANSFER_MODE__VALUE 0x0003 212#define TRANSFER_MODE__VALUE 0x0003
213 213
214#define INTR_STATUS0 0x410 214#define INTR_STATUS(__bank) (0x410 + ((__bank) * 0x50))
215#define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001 215#define INTR_EN(__bank) (0x420 + ((__bank) * 0x50))
216#define INTR_STATUS0__ECC_ERR 0x0002 216
217#define INTR_STATUS0__DMA_CMD_COMP 0x0004 217#define INTR_STATUS__ECC_TRANSACTION_DONE 0x0001
218#define INTR_STATUS0__TIME_OUT 0x0008 218#define INTR_STATUS__ECC_ERR 0x0002
219#define INTR_STATUS0__PROGRAM_FAIL 0x0010 219#define INTR_STATUS__DMA_CMD_COMP 0x0004
220#define INTR_STATUS0__ERASE_FAIL 0x0020 220#define INTR_STATUS__TIME_OUT 0x0008
221#define INTR_STATUS0__LOAD_COMP 0x0040 221#define INTR_STATUS__PROGRAM_FAIL 0x0010
222#define INTR_STATUS0__PROGRAM_COMP 0x0080 222#define INTR_STATUS__ERASE_FAIL 0x0020
223#define INTR_STATUS0__ERASE_COMP 0x0100 223#define INTR_STATUS__LOAD_COMP 0x0040
224#define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200 224#define INTR_STATUS__PROGRAM_COMP 0x0080
225#define INTR_STATUS0__LOCKED_BLK 0x0400 225#define INTR_STATUS__ERASE_COMP 0x0100
226#define INTR_STATUS0__UNSUP_CMD 0x0800 226#define INTR_STATUS__PIPE_CPYBCK_CMD_COMP 0x0200
227#define INTR_STATUS0__INT_ACT 0x1000 227#define INTR_STATUS__LOCKED_BLK 0x0400
228#define INTR_STATUS0__RST_COMP 0x2000 228#define INTR_STATUS__UNSUP_CMD 0x0800
229#define INTR_STATUS0__PIPE_CMD_ERR 0x4000 229#define INTR_STATUS__INT_ACT 0x1000
230#define INTR_STATUS0__PAGE_XFER_INC 0x8000 230#define INTR_STATUS__RST_COMP 0x2000
231 231#define INTR_STATUS__PIPE_CMD_ERR 0x4000
232#define INTR_EN0 0x420 232#define INTR_STATUS__PAGE_XFER_INC 0x8000
233#define INTR_EN0__ECC_TRANSACTION_DONE 0x0001 233
234#define INTR_EN0__ECC_ERR 0x0002 234#define INTR_EN__ECC_TRANSACTION_DONE 0x0001
235#define INTR_EN0__DMA_CMD_COMP 0x0004 235#define INTR_EN__ECC_ERR 0x0002
236#define INTR_EN0__TIME_OUT 0x0008 236#define INTR_EN__DMA_CMD_COMP 0x0004
237#define INTR_EN0__PROGRAM_FAIL 0x0010 237#define INTR_EN__TIME_OUT 0x0008
238#define INTR_EN0__ERASE_FAIL 0x0020 238#define INTR_EN__PROGRAM_FAIL 0x0010
239#define INTR_EN0__LOAD_COMP 0x0040 239#define INTR_EN__ERASE_FAIL 0x0020
240#define INTR_EN0__PROGRAM_COMP 0x0080 240#define INTR_EN__LOAD_COMP 0x0040
241#define INTR_EN0__ERASE_COMP 0x0100 241#define INTR_EN__PROGRAM_COMP 0x0080
242#define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200 242#define INTR_EN__ERASE_COMP 0x0100
243#define INTR_EN0__LOCKED_BLK 0x0400 243#define INTR_EN__PIPE_CPYBCK_CMD_COMP 0x0200
244#define INTR_EN0__UNSUP_CMD 0x0800 244#define INTR_EN__LOCKED_BLK 0x0400
245#define INTR_EN0__INT_ACT 0x1000 245#define INTR_EN__UNSUP_CMD 0x0800
246#define INTR_EN0__RST_COMP 0x2000 246#define INTR_EN__INT_ACT 0x1000
247#define INTR_EN0__PIPE_CMD_ERR 0x4000 247#define INTR_EN__RST_COMP 0x2000
248#define INTR_EN0__PAGE_XFER_INC 0x8000 248#define INTR_EN__PIPE_CMD_ERR 0x4000
249 249#define INTR_EN__PAGE_XFER_INC 0x8000
250#define PAGE_CNT0 0x430 250
251#define PAGE_CNT0__VALUE 0x00ff 251#define PAGE_CNT(__bank) (0x430 + ((__bank) * 0x50))
252 252#define ERR_PAGE_ADDR(__bank) (0x440 + ((__bank) * 0x50))
253#define ERR_PAGE_ADDR0 0x440 253#define ERR_BLOCK_ADDR(__bank) (0x450 + ((__bank) * 0x50))
254#define ERR_PAGE_ADDR0__VALUE 0xffff
255
256#define ERR_BLOCK_ADDR0 0x450
257#define ERR_BLOCK_ADDR0__VALUE 0xffff
258
259#define INTR_STATUS1 0x460
260#define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001
261#define INTR_STATUS1__ECC_ERR 0x0002
262#define INTR_STATUS1__DMA_CMD_COMP 0x0004
263#define INTR_STATUS1__TIME_OUT 0x0008
264#define INTR_STATUS1__PROGRAM_FAIL 0x0010
265#define INTR_STATUS1__ERASE_FAIL 0x0020
266#define INTR_STATUS1__LOAD_COMP 0x0040
267#define INTR_STATUS1__PROGRAM_COMP 0x0080
268#define INTR_STATUS1__ERASE_COMP 0x0100
269#define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200
270#define INTR_STATUS1__LOCKED_BLK 0x0400
271#define INTR_STATUS1__UNSUP_CMD 0x0800
272#define INTR_STATUS1__INT_ACT 0x1000
273#define INTR_STATUS1__RST_COMP 0x2000
274#define INTR_STATUS1__PIPE_CMD_ERR 0x4000
275#define INTR_STATUS1__PAGE_XFER_INC 0x8000
276
277#define INTR_EN1 0x470
278#define INTR_EN1__ECC_TRANSACTION_DONE 0x0001
279#define INTR_EN1__ECC_ERR 0x0002
280#define INTR_EN1__DMA_CMD_COMP 0x0004
281#define INTR_EN1__TIME_OUT 0x0008
282#define INTR_EN1__PROGRAM_FAIL 0x0010
283#define INTR_EN1__ERASE_FAIL 0x0020
284#define INTR_EN1__LOAD_COMP 0x0040
285#define INTR_EN1__PROGRAM_COMP 0x0080
286#define INTR_EN1__ERASE_COMP 0x0100
287#define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200
288#define INTR_EN1__LOCKED_BLK 0x0400
289#define INTR_EN1__UNSUP_CMD 0x0800
290#define INTR_EN1__INT_ACT 0x1000
291#define INTR_EN1__RST_COMP 0x2000
292#define INTR_EN1__PIPE_CMD_ERR 0x4000
293#define INTR_EN1__PAGE_XFER_INC 0x8000
294
295#define PAGE_CNT1 0x480
296#define PAGE_CNT1__VALUE 0x00ff
297
298#define ERR_PAGE_ADDR1 0x490
299#define ERR_PAGE_ADDR1__VALUE 0xffff
300
301#define ERR_BLOCK_ADDR1 0x4a0
302#define ERR_BLOCK_ADDR1__VALUE 0xffff
303
304#define INTR_STATUS2 0x4b0
305#define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001
306#define INTR_STATUS2__ECC_ERR 0x0002
307#define INTR_STATUS2__DMA_CMD_COMP 0x0004
308#define INTR_STATUS2__TIME_OUT 0x0008
309#define INTR_STATUS2__PROGRAM_FAIL 0x0010
310#define INTR_STATUS2__ERASE_FAIL 0x0020
311#define INTR_STATUS2__LOAD_COMP 0x0040
312#define INTR_STATUS2__PROGRAM_COMP 0x0080
313#define INTR_STATUS2__ERASE_COMP 0x0100
314#define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200
315#define INTR_STATUS2__LOCKED_BLK 0x0400
316#define INTR_STATUS2__UNSUP_CMD 0x0800
317#define INTR_STATUS2__INT_ACT 0x1000
318#define INTR_STATUS2__RST_COMP 0x2000
319#define INTR_STATUS2__PIPE_CMD_ERR 0x4000
320#define INTR_STATUS2__PAGE_XFER_INC 0x8000
321
322#define INTR_EN2 0x4c0
323#define INTR_EN2__ECC_TRANSACTION_DONE 0x0001
324#define INTR_EN2__ECC_ERR 0x0002
325#define INTR_EN2__DMA_CMD_COMP 0x0004
326#define INTR_EN2__TIME_OUT 0x0008
327#define INTR_EN2__PROGRAM_FAIL 0x0010
328#define INTR_EN2__ERASE_FAIL 0x0020
329#define INTR_EN2__LOAD_COMP 0x0040
330#define INTR_EN2__PROGRAM_COMP 0x0080
331#define INTR_EN2__ERASE_COMP 0x0100
332#define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200
333#define INTR_EN2__LOCKED_BLK 0x0400
334#define INTR_EN2__UNSUP_CMD 0x0800
335#define INTR_EN2__INT_ACT 0x1000
336#define INTR_EN2__RST_COMP 0x2000
337#define INTR_EN2__PIPE_CMD_ERR 0x4000
338#define INTR_EN2__PAGE_XFER_INC 0x8000
339
340#define PAGE_CNT2 0x4d0
341#define PAGE_CNT2__VALUE 0x00ff
342
343#define ERR_PAGE_ADDR2 0x4e0
344#define ERR_PAGE_ADDR2__VALUE 0xffff
345
346#define ERR_BLOCK_ADDR2 0x4f0
347#define ERR_BLOCK_ADDR2__VALUE 0xffff
348
349#define INTR_STATUS3 0x500
350#define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001
351#define INTR_STATUS3__ECC_ERR 0x0002
352#define INTR_STATUS3__DMA_CMD_COMP 0x0004
353#define INTR_STATUS3__TIME_OUT 0x0008
354#define INTR_STATUS3__PROGRAM_FAIL 0x0010
355#define INTR_STATUS3__ERASE_FAIL 0x0020
356#define INTR_STATUS3__LOAD_COMP 0x0040
357#define INTR_STATUS3__PROGRAM_COMP 0x0080
358#define INTR_STATUS3__ERASE_COMP 0x0100
359#define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200
360#define INTR_STATUS3__LOCKED_BLK 0x0400
361#define INTR_STATUS3__UNSUP_CMD 0x0800
362#define INTR_STATUS3__INT_ACT 0x1000
363#define INTR_STATUS3__RST_COMP 0x2000
364#define INTR_STATUS3__PIPE_CMD_ERR 0x4000
365#define INTR_STATUS3__PAGE_XFER_INC 0x8000
366
367#define INTR_EN3 0x510
368#define INTR_EN3__ECC_TRANSACTION_DONE 0x0001
369#define INTR_EN3__ECC_ERR 0x0002
370#define INTR_EN3__DMA_CMD_COMP 0x0004
371#define INTR_EN3__TIME_OUT 0x0008
372#define INTR_EN3__PROGRAM_FAIL 0x0010
373#define INTR_EN3__ERASE_FAIL 0x0020
374#define INTR_EN3__LOAD_COMP 0x0040
375#define INTR_EN3__PROGRAM_COMP 0x0080
376#define INTR_EN3__ERASE_COMP 0x0100
377#define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200
378#define INTR_EN3__LOCKED_BLK 0x0400
379#define INTR_EN3__UNSUP_CMD 0x0800
380#define INTR_EN3__INT_ACT 0x1000
381#define INTR_EN3__RST_COMP 0x2000
382#define INTR_EN3__PIPE_CMD_ERR 0x4000
383#define INTR_EN3__PAGE_XFER_INC 0x8000
384
385#define PAGE_CNT3 0x520
386#define PAGE_CNT3__VALUE 0x00ff
387
388#define ERR_PAGE_ADDR3 0x530
389#define ERR_PAGE_ADDR3__VALUE 0xffff
390
391#define ERR_BLOCK_ADDR3 0x540
392#define ERR_BLOCK_ADDR3__VALUE 0xffff
393 254
394#define DATA_INTR 0x550 255#define DATA_INTR 0x550
395#define DATA_INTR__WRITE_SPACE_AV 0x0001 256#define DATA_INTR__WRITE_SPACE_AV 0x0001
@@ -484,141 +345,23 @@
484#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010 345#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
485#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020 346#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
486 347
487#define PERM_SRC_ID_0 0x830 348#define PERM_SRC_ID(__bank) (0x830 + ((__bank) * 0x40))
488#define PERM_SRC_ID_0__SRCID 0x00ff 349#define PERM_SRC_ID__SRCID 0x00ff
489#define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800 350#define PERM_SRC_ID__DIRECT_ACCESS_ACTIVE 0x0800
490#define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000 351#define PERM_SRC_ID__WRITE_ACTIVE 0x2000
491#define PERM_SRC_ID_0__READ_ACTIVE 0x4000 352#define PERM_SRC_ID__READ_ACTIVE 0x4000
492#define PERM_SRC_ID_0__PARTITION_VALID 0x8000 353#define PERM_SRC_ID__PARTITION_VALID 0x8000
493 354
494#define MIN_BLK_ADDR_0 0x840 355#define MIN_BLK_ADDR(__bank) (0x840 + ((__bank) * 0x40))
495#define MIN_BLK_ADDR_0__VALUE 0xffff 356#define MIN_BLK_ADDR__VALUE 0xffff
496 357
497#define MAX_BLK_ADDR_0 0x850 358#define MAX_BLK_ADDR(__bank) (0x850 + ((__bank) * 0x40))
498#define MAX_BLK_ADDR_0__VALUE 0xffff 359#define MAX_BLK_ADDR__VALUE 0xffff
499 360
500#define MIN_MAX_BANK_0 0x860 361#define MIN_MAX_BANK(__bank) (0x860 + ((__bank) * 0x40))
501#define MIN_MAX_BANK_0__MIN_VALUE 0x0003 362#define MIN_MAX_BANK__MIN_VALUE 0x0003
502#define MIN_MAX_BANK_0__MAX_VALUE 0x000c 363#define MIN_MAX_BANK__MAX_VALUE 0x000c
503
504#define PERM_SRC_ID_1 0x870
505#define PERM_SRC_ID_1__SRCID 0x00ff
506#define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800
507#define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000
508#define PERM_SRC_ID_1__READ_ACTIVE 0x4000
509#define PERM_SRC_ID_1__PARTITION_VALID 0x8000
510
511#define MIN_BLK_ADDR_1 0x880
512#define MIN_BLK_ADDR_1__VALUE 0xffff
513
514#define MAX_BLK_ADDR_1 0x890
515#define MAX_BLK_ADDR_1__VALUE 0xffff
516
517#define MIN_MAX_BANK_1 0x8a0
518#define MIN_MAX_BANK_1__MIN_VALUE 0x0003
519#define MIN_MAX_BANK_1__MAX_VALUE 0x000c
520
521#define PERM_SRC_ID_2 0x8b0
522#define PERM_SRC_ID_2__SRCID 0x00ff
523#define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800
524#define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000
525#define PERM_SRC_ID_2__READ_ACTIVE 0x4000
526#define PERM_SRC_ID_2__PARTITION_VALID 0x8000
527
528#define MIN_BLK_ADDR_2 0x8c0
529#define MIN_BLK_ADDR_2__VALUE 0xffff
530
531#define MAX_BLK_ADDR_2 0x8d0
532#define MAX_BLK_ADDR_2__VALUE 0xffff
533
534#define MIN_MAX_BANK_2 0x8e0
535#define MIN_MAX_BANK_2__MIN_VALUE 0x0003
536#define MIN_MAX_BANK_2__MAX_VALUE 0x000c
537
538#define PERM_SRC_ID_3 0x8f0
539#define PERM_SRC_ID_3__SRCID 0x00ff
540#define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800
541#define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000
542#define PERM_SRC_ID_3__READ_ACTIVE 0x4000
543#define PERM_SRC_ID_3__PARTITION_VALID 0x8000
544
545#define MIN_BLK_ADDR_3 0x900
546#define MIN_BLK_ADDR_3__VALUE 0xffff
547
548#define MAX_BLK_ADDR_3 0x910
549#define MAX_BLK_ADDR_3__VALUE 0xffff
550
551#define MIN_MAX_BANK_3 0x920
552#define MIN_MAX_BANK_3__MIN_VALUE 0x0003
553#define MIN_MAX_BANK_3__MAX_VALUE 0x000c
554
555#define PERM_SRC_ID_4 0x930
556#define PERM_SRC_ID_4__SRCID 0x00ff
557#define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800
558#define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000
559#define PERM_SRC_ID_4__READ_ACTIVE 0x4000
560#define PERM_SRC_ID_4__PARTITION_VALID 0x8000
561
562#define MIN_BLK_ADDR_4 0x940
563#define MIN_BLK_ADDR_4__VALUE 0xffff
564
565#define MAX_BLK_ADDR_4 0x950
566#define MAX_BLK_ADDR_4__VALUE 0xffff
567
568#define MIN_MAX_BANK_4 0x960
569#define MIN_MAX_BANK_4__MIN_VALUE 0x0003
570#define MIN_MAX_BANK_4__MAX_VALUE 0x000c
571
572#define PERM_SRC_ID_5 0x970
573#define PERM_SRC_ID_5__SRCID 0x00ff
574#define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800
575#define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000
576#define PERM_SRC_ID_5__READ_ACTIVE 0x4000
577#define PERM_SRC_ID_5__PARTITION_VALID 0x8000
578
579#define MIN_BLK_ADDR_5 0x980
580#define MIN_BLK_ADDR_5__VALUE 0xffff
581
582#define MAX_BLK_ADDR_5 0x990
583#define MAX_BLK_ADDR_5__VALUE 0xffff
584
585#define MIN_MAX_BANK_5 0x9a0
586#define MIN_MAX_BANK_5__MIN_VALUE 0x0003
587#define MIN_MAX_BANK_5__MAX_VALUE 0x000c
588
589#define PERM_SRC_ID_6 0x9b0
590#define PERM_SRC_ID_6__SRCID 0x00ff
591#define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800
592#define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000
593#define PERM_SRC_ID_6__READ_ACTIVE 0x4000
594#define PERM_SRC_ID_6__PARTITION_VALID 0x8000
595
596#define MIN_BLK_ADDR_6 0x9c0
597#define MIN_BLK_ADDR_6__VALUE 0xffff
598
599#define MAX_BLK_ADDR_6 0x9d0
600#define MAX_BLK_ADDR_6__VALUE 0xffff
601
602#define MIN_MAX_BANK_6 0x9e0
603#define MIN_MAX_BANK_6__MIN_VALUE 0x0003
604#define MIN_MAX_BANK_6__MAX_VALUE 0x000c
605
606#define PERM_SRC_ID_7 0x9f0
607#define PERM_SRC_ID_7__SRCID 0x00ff
608#define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800
609#define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000
610#define PERM_SRC_ID_7__READ_ACTIVE 0x4000
611#define PERM_SRC_ID_7__PARTITION_VALID 0x8000
612 364
613#define MIN_BLK_ADDR_7 0xa00
614#define MIN_BLK_ADDR_7__VALUE 0xffff
615
616#define MAX_BLK_ADDR_7 0xa10
617#define MAX_BLK_ADDR_7__VALUE 0xffff
618
619#define MIN_MAX_BANK_7 0xa20
620#define MIN_MAX_BANK_7__MIN_VALUE 0x0003
621#define MIN_MAX_BANK_7__MAX_VALUE 0x000c
622 365
623/* ffsdefs.h */ 366/* ffsdefs.h */
624#define CLEAR 0 /*use this to clear a field instead of "fail"*/ 367#define CLEAR 0 /*use this to clear a field instead of "fail"*/
@@ -711,7 +454,6 @@
711#define READ_WRITE_ENABLE_HIGH_COUNT 22 454#define READ_WRITE_ENABLE_HIGH_COUNT 22
712 455
713#define ECC_SECTOR_SIZE 512 456#define ECC_SECTOR_SIZE 512
714#define LLD_MAX_FLASH_BANKS 4
715 457
716#define DENALI_BUF_SIZE (NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) 458#define DENALI_BUF_SIZE (NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE)
717 459
@@ -732,7 +474,7 @@ struct denali_nand_info {
732 int status; 474 int status;
733 int platform; 475 int platform;
734 struct nand_buf buf; 476 struct nand_buf buf;
735 struct pci_dev *dev; 477 struct device *dev;
736 int total_used_banks; 478 int total_used_banks;
737 uint32_t block; /* stored for future use */ 479 uint32_t block; /* stored for future use */
738 uint16_t page; 480 uint16_t page;
@@ -751,6 +493,7 @@ struct denali_nand_info {
751 uint32_t totalblks; 493 uint32_t totalblks;
752 uint32_t blksperchip; 494 uint32_t blksperchip;
753 uint32_t bbtskipbytes; 495 uint32_t bbtskipbytes;
496 uint32_t max_banks;
754}; 497};
755 498
756#endif /*_LLD_NAND_*/ 499#endif /*_LLD_NAND_*/
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 657b9f4b6f9b..7837728d02ff 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -1360,11 +1360,9 @@ static int __init nftl_scan_bbt(struct mtd_info *mtd)
1360 At least as nand_bbt.c is currently written. */ 1360 At least as nand_bbt.c is currently written. */
1361 if ((ret = nand_scan_bbt(mtd, NULL))) 1361 if ((ret = nand_scan_bbt(mtd, NULL)))
1362 return ret; 1362 return ret;
1363 add_mtd_device(mtd); 1363 mtd_device_register(mtd, NULL, 0);
1364#ifdef CONFIG_MTD_PARTITIONS
1365 if (!no_autopart) 1364 if (!no_autopart)
1366 add_mtd_partitions(mtd, parts, numparts); 1365 mtd_device_register(mtd, parts, numparts);
1367#endif
1368 return 0; 1366 return 0;
1369} 1367}
1370 1368
@@ -1419,11 +1417,9 @@ static int __init inftl_scan_bbt(struct mtd_info *mtd)
1419 autopartitioning, but I want to give it more thought. */ 1417 autopartitioning, but I want to give it more thought. */
1420 if (!numparts) 1418 if (!numparts)
1421 return -EIO; 1419 return -EIO;
1422 add_mtd_device(mtd); 1420 mtd_device_register(mtd, NULL, 0);
1423#ifdef CONFIG_MTD_PARTITIONS
1424 if (!no_autopart) 1421 if (!no_autopart)
1425 add_mtd_partitions(mtd, parts, numparts); 1422 mtd_device_register(mtd, parts, numparts);
1426#endif
1427 return 0; 1423 return 0;
1428} 1424}
1429 1425
@@ -1678,9 +1674,9 @@ static int __init doc_probe(unsigned long physadr)
1678 /* DBB note: i believe nand_release is necessary here, as 1674 /* DBB note: i believe nand_release is necessary here, as
1679 buffers may have been allocated in nand_base. Check with 1675 buffers may have been allocated in nand_base. Check with
1680 Thomas. FIX ME! */ 1676 Thomas. FIX ME! */
1681 /* nand_release will call del_mtd_device, but we haven't yet 1677 /* nand_release will call mtd_device_unregister, but we
1682 added it. This is handled without incident by 1678 haven't yet added it. This is handled without incident by
1683 del_mtd_device, as far as I can tell. */ 1679 mtd_device_unregister, as far as I can tell. */
1684 nand_release(mtd); 1680 nand_release(mtd);
1685 kfree(mtd); 1681 kfree(mtd);
1686 goto fail; 1682 goto fail;
diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c
index 86366bfba9f8..8400d0f6dada 100644
--- a/drivers/mtd/nand/edb7312.c
+++ b/drivers/mtd/nand/edb7312.c
@@ -55,7 +55,6 @@ static unsigned long ep7312_fio_pbase = EP7312_FIO_PBASE;
55static void __iomem *ep7312_pxdr = (void __iomem *)EP7312_PXDR; 55static void __iomem *ep7312_pxdr = (void __iomem *)EP7312_PXDR;
56static void __iomem *ep7312_pxddr = (void __iomem *)EP7312_PXDDR; 56static void __iomem *ep7312_pxddr = (void __iomem *)EP7312_PXDDR;
57 57
58#ifdef CONFIG_MTD_PARTITIONS
59/* 58/*
60 * Define static partitions for flash device 59 * Define static partitions for flash device
61 */ 60 */
@@ -67,8 +66,6 @@ static struct mtd_partition partition_info[] = {
67 66
68#define NUM_PARTITIONS 1 67#define NUM_PARTITIONS 1
69 68
70#endif
71
72/* 69/*
73 * hardware specific access to control-lines 70 * hardware specific access to control-lines
74 * 71 *
@@ -101,9 +98,7 @@ static int ep7312_device_ready(struct mtd_info *mtd)
101 return 1; 98 return 1;
102} 99}
103 100
104#ifdef CONFIG_MTD_PARTITIONS
105const char *part_probes[] = { "cmdlinepart", NULL }; 101const char *part_probes[] = { "cmdlinepart", NULL };
106#endif
107 102
108/* 103/*
109 * Main initialization routine 104 * Main initialization routine
@@ -162,14 +157,12 @@ static int __init ep7312_init(void)
162 kfree(ep7312_mtd); 157 kfree(ep7312_mtd);
163 return -ENXIO; 158 return -ENXIO;
164 } 159 }
165#ifdef CONFIG_MTD_PARTITIONS
166 ep7312_mtd->name = "edb7312-nand"; 160 ep7312_mtd->name = "edb7312-nand";
167 mtd_parts_nb = parse_mtd_partitions(ep7312_mtd, part_probes, &mtd_parts, 0); 161 mtd_parts_nb = parse_mtd_partitions(ep7312_mtd, part_probes, &mtd_parts, 0);
168 if (mtd_parts_nb > 0) 162 if (mtd_parts_nb > 0)
169 part_type = "command line"; 163 part_type = "command line";
170 else 164 else
171 mtd_parts_nb = 0; 165 mtd_parts_nb = 0;
172#endif
173 if (mtd_parts_nb == 0) { 166 if (mtd_parts_nb == 0) {
174 mtd_parts = partition_info; 167 mtd_parts = partition_info;
175 mtd_parts_nb = NUM_PARTITIONS; 168 mtd_parts_nb = NUM_PARTITIONS;
@@ -178,7 +171,7 @@ static int __init ep7312_init(void)
178 171
179 /* Register the partitions */ 172 /* Register the partitions */
180 printk(KERN_NOTICE "Using %s partition definition\n", part_type); 173 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
181 add_mtd_partitions(ep7312_mtd, mtd_parts, mtd_parts_nb); 174 mtd_device_register(ep7312_mtd, mtd_parts, mtd_parts_nb);
182 175
183 /* Return happy */ 176 /* Return happy */
184 return 0; 177 return 0;
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 537e380b8dcb..0bb254c7d2b1 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -841,12 +841,9 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
841 struct fsl_elbc_mtd *priv; 841 struct fsl_elbc_mtd *priv;
842 struct resource res; 842 struct resource res;
843 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl; 843 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl;
844
845#ifdef CONFIG_MTD_PARTITIONS
846 static const char *part_probe_types[] 844 static const char *part_probe_types[]
847 = { "cmdlinepart", "RedBoot", NULL }; 845 = { "cmdlinepart", "RedBoot", NULL };
848 struct mtd_partition *parts; 846 struct mtd_partition *parts;
849#endif
850 int ret; 847 int ret;
851 int bank; 848 int bank;
852 struct device *dev; 849 struct device *dev;
@@ -935,26 +932,19 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
935 if (ret) 932 if (ret)
936 goto err; 933 goto err;
937 934
938#ifdef CONFIG_MTD_PARTITIONS
939 /* First look for RedBoot table or partitions on the command 935 /* First look for RedBoot table or partitions on the command
940 * line, these take precedence over device tree information */ 936 * line, these take precedence over device tree information */
941 ret = parse_mtd_partitions(&priv->mtd, part_probe_types, &parts, 0); 937 ret = parse_mtd_partitions(&priv->mtd, part_probe_types, &parts, 0);
942 if (ret < 0) 938 if (ret < 0)
943 goto err; 939 goto err;
944 940
945#ifdef CONFIG_MTD_OF_PARTS
946 if (ret == 0) { 941 if (ret == 0) {
947 ret = of_mtd_parse_partitions(priv->dev, node, &parts); 942 ret = of_mtd_parse_partitions(priv->dev, node, &parts);
948 if (ret < 0) 943 if (ret < 0)
949 goto err; 944 goto err;
950 } 945 }
951#endif
952 946
953 if (ret > 0) 947 mtd_device_register(&priv->mtd, parts, ret);
954 add_mtd_partitions(&priv->mtd, parts, ret);
955 else
956#endif
957 add_mtd_device(&priv->mtd);
958 948
959 printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n", 949 printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n",
960 (unsigned long long)res.start, priv->bank); 950 (unsigned long long)res.start, priv->bank);
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 073ee026a17c..23752fd5bc59 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -33,10 +33,7 @@ struct fsl_upm_nand {
33 struct mtd_info mtd; 33 struct mtd_info mtd;
34 struct nand_chip chip; 34 struct nand_chip chip;
35 int last_ctrl; 35 int last_ctrl;
36#ifdef CONFIG_MTD_PARTITIONS
37 struct mtd_partition *parts; 36 struct mtd_partition *parts;
38#endif
39
40 struct fsl_upm upm; 37 struct fsl_upm upm;
41 uint8_t upm_addr_offset; 38 uint8_t upm_addr_offset;
42 uint8_t upm_cmd_offset; 39 uint8_t upm_cmd_offset;
@@ -161,9 +158,7 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
161{ 158{
162 int ret; 159 int ret;
163 struct device_node *flash_np; 160 struct device_node *flash_np;
164#ifdef CONFIG_MTD_PARTITIONS
165 static const char *part_types[] = { "cmdlinepart", NULL, }; 161 static const char *part_types[] = { "cmdlinepart", NULL, };
166#endif
167 162
168 fun->chip.IO_ADDR_R = fun->io_base; 163 fun->chip.IO_ADDR_R = fun->io_base;
169 fun->chip.IO_ADDR_W = fun->io_base; 164 fun->chip.IO_ADDR_W = fun->io_base;
@@ -197,7 +192,6 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
197 if (ret) 192 if (ret)
198 goto err; 193 goto err;
199 194
200#ifdef CONFIG_MTD_PARTITIONS
201 ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0); 195 ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0);
202 196
203#ifdef CONFIG_MTD_OF_PARTS 197#ifdef CONFIG_MTD_OF_PARTS
@@ -207,11 +201,7 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
207 goto err; 201 goto err;
208 } 202 }
209#endif 203#endif
210 if (ret > 0) 204 ret = mtd_device_register(&fun->mtd, fun->parts, ret);
211 ret = add_mtd_partitions(&fun->mtd, fun->parts, ret);
212 else
213#endif
214 ret = add_mtd_device(&fun->mtd);
215err: 205err:
216 of_node_put(flash_np); 206 of_node_put(flash_np);
217 return ret; 207 return ret;
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 0d45ef3883e8..e9b275ac381c 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -120,8 +120,6 @@ static struct fsmc_eccplace fsmc_ecc4_sp_place = {
120 } 120 }
121}; 121};
122 122
123
124#ifdef CONFIG_MTD_PARTITIONS
125/* 123/*
126 * Default partition tables to be used if the partition information not 124 * Default partition tables to be used if the partition information not
127 * provided through platform data. 125 * provided through platform data.
@@ -182,7 +180,6 @@ static struct mtd_partition partition_info_128KB_blk[] = {
182#ifdef CONFIG_MTD_CMDLINE_PARTS 180#ifdef CONFIG_MTD_CMDLINE_PARTS
183const char *part_probes[] = { "cmdlinepart", NULL }; 181const char *part_probes[] = { "cmdlinepart", NULL };
184#endif 182#endif
185#endif
186 183
187/** 184/**
188 * struct fsmc_nand_data - structure for FSMC NAND device state 185 * struct fsmc_nand_data - structure for FSMC NAND device state
@@ -719,7 +716,6 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
719 * platform data, 716 * platform data,
720 * default partition information present in driver. 717 * default partition information present in driver.
721 */ 718 */
722#ifdef CONFIG_MTD_PARTITIONS
723#ifdef CONFIG_MTD_CMDLINE_PARTS 719#ifdef CONFIG_MTD_CMDLINE_PARTS
724 /* 720 /*
725 * Check if partition info passed via command line 721 * Check if partition info passed via command line
@@ -777,19 +773,10 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
777 } 773 }
778#endif 774#endif
779 775
780 if (host->partitions) { 776 ret = mtd_device_register(&host->mtd, host->partitions,
781 ret = add_mtd_partitions(&host->mtd, host->partitions, 777 host->nr_partitions);
782 host->nr_partitions); 778 if (ret)
783 if (ret)
784 goto err_probe;
785 }
786#else
787 dev_info(&pdev->dev, "Registering %s as whole device\n", mtd->name);
788 if (!add_mtd_device(mtd)) {
789 ret = -ENXIO;
790 goto err_probe; 779 goto err_probe;
791 }
792#endif
793 780
794 platform_set_drvdata(pdev, host); 781 platform_set_drvdata(pdev, host);
795 dev_info(&pdev->dev, "FSMC NAND driver registration successful\n"); 782 dev_info(&pdev->dev, "FSMC NAND driver registration successful\n");
@@ -835,11 +822,7 @@ static int fsmc_nand_remove(struct platform_device *pdev)
835 platform_set_drvdata(pdev, NULL); 822 platform_set_drvdata(pdev, NULL);
836 823
837 if (host) { 824 if (host) {
838#ifdef CONFIG_MTD_PARTITIONS 825 mtd_device_unregister(&host->mtd);
839 del_mtd_partitions(&host->mtd);
840#else
841 del_mtd_device(&host->mtd);
842#endif
843 clk_disable(host->clk); 826 clk_disable(host->clk);
844 clk_put(host->clk); 827 clk_put(host->clk);
845 828
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 0cde618bcc1e..2c2060b2800e 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -316,8 +316,8 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
316 gpiomtd->plat.adjust_parts(&gpiomtd->plat, 316 gpiomtd->plat.adjust_parts(&gpiomtd->plat,
317 gpiomtd->mtd_info.size); 317 gpiomtd->mtd_info.size);
318 318
319 add_mtd_partitions(&gpiomtd->mtd_info, gpiomtd->plat.parts, 319 mtd_device_register(&gpiomtd->mtd_info, gpiomtd->plat.parts,
320 gpiomtd->plat.num_parts); 320 gpiomtd->plat.num_parts);
321 platform_set_drvdata(dev, gpiomtd); 321 platform_set_drvdata(dev, gpiomtd);
322 322
323 return 0; 323 return 0;
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index f8ce79b446ed..02a03e67109c 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -38,7 +38,6 @@ static struct mtd_info *h1910_nand_mtd = NULL;
38 * Module stuff 38 * Module stuff
39 */ 39 */
40 40
41#ifdef CONFIG_MTD_PARTITIONS
42/* 41/*
43 * Define static partitions for flash device 42 * Define static partitions for flash device
44 */ 43 */
@@ -50,8 +49,6 @@ static struct mtd_partition partition_info[] = {
50 49
51#define NUM_PARTITIONS 1 50#define NUM_PARTITIONS 1
52 51
53#endif
54
55/* 52/*
56 * hardware specific access to control-lines 53 * hardware specific access to control-lines
57 * 54 *
@@ -154,7 +151,7 @@ static int __init h1910_init(void)
154 151
155 /* Register the partitions */ 152 /* Register the partitions */
156 printk(KERN_NOTICE "Using %s partition definition\n", part_type); 153 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
157 add_mtd_partitions(h1910_nand_mtd, mtd_parts, mtd_parts_nb); 154 mtd_device_register(h1910_nand_mtd, mtd_parts, mtd_parts_nb);
158 155
159 /* Return happy */ 156 /* Return happy */
160 return 0; 157 return 0;
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index cea38a5d4ac5..6e813daed068 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -299,10 +299,8 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
299 struct nand_chip *chip; 299 struct nand_chip *chip;
300 struct mtd_info *mtd; 300 struct mtd_info *mtd;
301 struct jz_nand_platform_data *pdata = pdev->dev.platform_data; 301 struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
302#ifdef CONFIG_MTD_PARTITIONS
303 struct mtd_partition *partition_info; 302 struct mtd_partition *partition_info;
304 int num_partitions = 0; 303 int num_partitions = 0;
305#endif
306 304
307 nand = kzalloc(sizeof(*nand), GFP_KERNEL); 305 nand = kzalloc(sizeof(*nand), GFP_KERNEL);
308 if (!nand) { 306 if (!nand) {
@@ -375,7 +373,6 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
375 goto err_gpio_free; 373 goto err_gpio_free;
376 } 374 }
377 375
378#ifdef CONFIG_MTD_PARTITIONS
379#ifdef CONFIG_MTD_CMDLINE_PARTS 376#ifdef CONFIG_MTD_CMDLINE_PARTS
380 num_partitions = parse_mtd_partitions(mtd, part_probes, 377 num_partitions = parse_mtd_partitions(mtd, part_probes,
381 &partition_info, 0); 378 &partition_info, 0);
@@ -384,12 +381,7 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
384 num_partitions = pdata->num_partitions; 381 num_partitions = pdata->num_partitions;
385 partition_info = pdata->partitions; 382 partition_info = pdata->partitions;
386 } 383 }
387 384 ret = mtd_device_register(mtd, partition_info, num_partitions);
388 if (num_partitions > 0)
389 ret = add_mtd_partitions(mtd, partition_info, num_partitions);
390 else
391#endif
392 ret = add_mtd_device(mtd);
393 385
394 if (ret) { 386 if (ret) {
395 dev_err(&pdev->dev, "Failed to add mtd device\n"); 387 dev_err(&pdev->dev, "Failed to add mtd device\n");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 0b81b5b499d1..2f7c930872f9 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -131,9 +131,7 @@ struct mpc5121_nfc_prv {
131 131
132static void mpc5121_nfc_done(struct mtd_info *mtd); 132static void mpc5121_nfc_done(struct mtd_info *mtd);
133 133
134#ifdef CONFIG_MTD_PARTITIONS
135static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL }; 134static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL };
136#endif
137 135
138/* Read NFC register */ 136/* Read NFC register */
139static inline u16 nfc_read(struct mtd_info *mtd, uint reg) 137static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
@@ -658,9 +656,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
658 struct mpc5121_nfc_prv *prv; 656 struct mpc5121_nfc_prv *prv;
659 struct resource res; 657 struct resource res;
660 struct mtd_info *mtd; 658 struct mtd_info *mtd;
661#ifdef CONFIG_MTD_PARTITIONS
662 struct mtd_partition *parts; 659 struct mtd_partition *parts;
663#endif
664 struct nand_chip *chip; 660 struct nand_chip *chip;
665 unsigned long regs_paddr, regs_size; 661 unsigned long regs_paddr, regs_size;
666 const __be32 *chips_no; 662 const __be32 *chips_no;
@@ -841,7 +837,6 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
841 dev_set_drvdata(dev, mtd); 837 dev_set_drvdata(dev, mtd);
842 838
843 /* Register device in MTD */ 839 /* Register device in MTD */
844#ifdef CONFIG_MTD_PARTITIONS
845 retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0); 840 retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0);
846#ifdef CONFIG_MTD_OF_PARTS 841#ifdef CONFIG_MTD_OF_PARTS
847 if (retval == 0) 842 if (retval == 0)
@@ -854,12 +849,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
854 goto error; 849 goto error;
855 } 850 }
856 851
857 if (retval > 0) 852 retval = mtd_device_register(mtd, parts, retval);
858 retval = add_mtd_partitions(mtd, parts, retval);
859 else
860#endif
861 retval = add_mtd_device(mtd);
862
863 if (retval) { 853 if (retval) {
864 dev_err(dev, "Error adding MTD device!\n"); 854 dev_err(dev, "Error adding MTD device!\n");
865 devm_free_irq(dev, prv->irq, mtd); 855 devm_free_irq(dev, prv->irq, mtd);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 42a95fb41504..90df34c4d26c 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -56,8 +56,14 @@
56#define NFC_V1_V2_WRPROT (host->regs + 0x12) 56#define NFC_V1_V2_WRPROT (host->regs + 0x12)
57#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14) 57#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14)
58#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16) 58#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16)
59#define NFC_V21_UNLOCKSTART_BLKADDR (host->regs + 0x20) 59#define NFC_V21_UNLOCKSTART_BLKADDR0 (host->regs + 0x20)
60#define NFC_V21_UNLOCKEND_BLKADDR (host->regs + 0x22) 60#define NFC_V21_UNLOCKSTART_BLKADDR1 (host->regs + 0x24)
61#define NFC_V21_UNLOCKSTART_BLKADDR2 (host->regs + 0x28)
62#define NFC_V21_UNLOCKSTART_BLKADDR3 (host->regs + 0x2c)
63#define NFC_V21_UNLOCKEND_BLKADDR0 (host->regs + 0x22)
64#define NFC_V21_UNLOCKEND_BLKADDR1 (host->regs + 0x26)
65#define NFC_V21_UNLOCKEND_BLKADDR2 (host->regs + 0x2a)
66#define NFC_V21_UNLOCKEND_BLKADDR3 (host->regs + 0x2e)
61#define NFC_V1_V2_NF_WRPRST (host->regs + 0x18) 67#define NFC_V1_V2_NF_WRPRST (host->regs + 0x18)
62#define NFC_V1_V2_CONFIG1 (host->regs + 0x1a) 68#define NFC_V1_V2_CONFIG1 (host->regs + 0x1a)
63#define NFC_V1_V2_CONFIG2 (host->regs + 0x1c) 69#define NFC_V1_V2_CONFIG2 (host->regs + 0x1c)
@@ -152,6 +158,7 @@ struct mxc_nand_host {
152 int clk_act; 158 int clk_act;
153 int irq; 159 int irq;
154 int eccsize; 160 int eccsize;
161 int active_cs;
155 162
156 struct completion op_completion; 163 struct completion op_completion;
157 164
@@ -236,9 +243,7 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
236 } 243 }
237}; 244};
238 245
239#ifdef CONFIG_MTD_PARTITIONS
240static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL }; 246static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL };
241#endif
242 247
243static irqreturn_t mxc_nfc_irq(int irq, void *dev_id) 248static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
244{ 249{
@@ -445,7 +450,7 @@ static void send_page_v1_v2(struct mtd_info *mtd, unsigned int ops)
445 for (i = 0; i < bufs; i++) { 450 for (i = 0; i < bufs; i++) {
446 451
447 /* NANDFC buffer 0 is used for page read/write */ 452 /* NANDFC buffer 0 is used for page read/write */
448 writew(i, NFC_V1_V2_BUF_ADDR); 453 writew((host->active_cs << 4) | i, NFC_V1_V2_BUF_ADDR);
449 454
450 writew(ops, NFC_V1_V2_CONFIG2); 455 writew(ops, NFC_V1_V2_CONFIG2);
451 456
@@ -470,7 +475,7 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
470 struct nand_chip *this = &host->nand; 475 struct nand_chip *this = &host->nand;
471 476
472 /* NANDFC buffer 0 is used for device ID output */ 477 /* NANDFC buffer 0 is used for device ID output */
473 writew(0x0, NFC_V1_V2_BUF_ADDR); 478 writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
474 479
475 writew(NFC_ID, NFC_V1_V2_CONFIG2); 480 writew(NFC_ID, NFC_V1_V2_CONFIG2);
476 481
@@ -505,7 +510,7 @@ static uint16_t get_dev_status_v1_v2(struct mxc_nand_host *host)
505 uint32_t store; 510 uint32_t store;
506 uint16_t ret; 511 uint16_t ret;
507 512
508 writew(0x0, NFC_V1_V2_BUF_ADDR); 513 writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
509 514
510 /* 515 /*
511 * The device status is stored in main_area0. To 516 * The device status is stored in main_area0. To
@@ -686,24 +691,24 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
686 struct nand_chip *nand_chip = mtd->priv; 691 struct nand_chip *nand_chip = mtd->priv;
687 struct mxc_nand_host *host = nand_chip->priv; 692 struct mxc_nand_host *host = nand_chip->priv;
688 693
689 switch (chip) { 694 if (chip == -1) {
690 case -1:
691 /* Disable the NFC clock */ 695 /* Disable the NFC clock */
692 if (host->clk_act) { 696 if (host->clk_act) {
693 clk_disable(host->clk); 697 clk_disable(host->clk);
694 host->clk_act = 0; 698 host->clk_act = 0;
695 } 699 }
696 break; 700 return;
697 case 0: 701 }
702
703 if (!host->clk_act) {
698 /* Enable the NFC clock */ 704 /* Enable the NFC clock */
699 if (!host->clk_act) { 705 clk_enable(host->clk);
700 clk_enable(host->clk); 706 host->clk_act = 1;
701 host->clk_act = 1; 707 }
702 }
703 break;
704 708
705 default: 709 if (nfc_is_v21()) {
706 break; 710 host->active_cs = chip;
711 writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
707 } 712 }
708} 713}
709 714
@@ -834,8 +839,14 @@ static void preset_v1_v2(struct mtd_info *mtd)
834 839
835 /* Blocks to be unlocked */ 840 /* Blocks to be unlocked */
836 if (nfc_is_v21()) { 841 if (nfc_is_v21()) {
837 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR); 842 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
838 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR); 843 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
844 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
845 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
846 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
847 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
848 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
849 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
839 } else if (nfc_is_v1()) { 850 } else if (nfc_is_v1()) {
840 writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR); 851 writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
841 writew(0x4000, NFC_V1_UNLOCKEND_BLKADDR); 852 writew(0x4000, NFC_V1_UNLOCKEND_BLKADDR);
@@ -1200,7 +1211,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1200 irq_control_v1_v2(host, 1); 1211 irq_control_v1_v2(host, 1);
1201 1212
1202 /* first scan to find the device and get the page size */ 1213 /* first scan to find the device and get the page size */
1203 if (nand_scan_ident(mtd, 1, NULL)) { 1214 if (nand_scan_ident(mtd, nfc_is_v21() ? 4 : 1, NULL)) {
1204 err = -ENXIO; 1215 err = -ENXIO;
1205 goto escan; 1216 goto escan;
1206 } 1217 }
@@ -1220,18 +1231,15 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1220 } 1231 }
1221 1232
1222 /* Register the partitions */ 1233 /* Register the partitions */
1223#ifdef CONFIG_MTD_PARTITIONS
1224 nr_parts = 1234 nr_parts =
1225 parse_mtd_partitions(mtd, part_probes, &host->parts, 0); 1235 parse_mtd_partitions(mtd, part_probes, &host->parts, 0);
1226 if (nr_parts > 0) 1236 if (nr_parts > 0)
1227 add_mtd_partitions(mtd, host->parts, nr_parts); 1237 mtd_device_register(mtd, host->parts, nr_parts);
1228 else if (pdata->parts) 1238 else if (pdata->parts)
1229 add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); 1239 mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
1230 else 1240 else {
1231#endif
1232 {
1233 pr_info("Registering %s as whole device\n", mtd->name); 1241 pr_info("Registering %s as whole device\n", mtd->name);
1234 add_mtd_device(mtd); 1242 mtd_device_register(mtd, NULL, 0);
1235 } 1243 }
1236 1244
1237 platform_set_drvdata(pdev, host); 1245 platform_set_drvdata(pdev, host);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index c54a4cbac6bc..a46e9bb847bd 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -47,10 +47,7 @@
47#include <linux/bitops.h> 47#include <linux/bitops.h>
48#include <linux/leds.h> 48#include <linux/leds.h>
49#include <linux/io.h> 49#include <linux/io.h>
50
51#ifdef CONFIG_MTD_PARTITIONS
52#include <linux/mtd/partitions.h> 50#include <linux/mtd/partitions.h>
53#endif
54 51
55/* Define default oob placement schemes for large and small page devices */ 52/* Define default oob placement schemes for large and small page devices */
56static struct nand_ecclayout nand_oob_8 = { 53static struct nand_ecclayout nand_oob_8 = {
@@ -976,9 +973,6 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
976 ret = __nand_unlock(mtd, ofs, len, 0); 973 ret = __nand_unlock(mtd, ofs, len, 0);
977 974
978out: 975out:
979 /* de-select the NAND device */
980 chip->select_chip(mtd, -1);
981
982 nand_release_device(mtd); 976 nand_release_device(mtd);
983 977
984 return ret; 978 return ret;
@@ -1046,9 +1040,6 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1046 ret = __nand_unlock(mtd, ofs, len, 0x1); 1040 ret = __nand_unlock(mtd, ofs, len, 0x1);
1047 1041
1048out: 1042out:
1049 /* de-select the NAND device */
1050 chip->select_chip(mtd, -1);
1051
1052 nand_release_device(mtd); 1043 nand_release_device(mtd);
1053 1044
1054 return ret; 1045 return ret;
@@ -3112,6 +3103,8 @@ ident_done:
3112 chip->chip_shift += 32 - 1; 3103 chip->chip_shift += 32 - 1;
3113 } 3104 }
3114 3105
3106 chip->badblockbits = 8;
3107
3115 /* Set the bad block position */ 3108 /* Set the bad block position */
3116 if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16)) 3109 if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
3117 chip->badblockpos = NAND_LARGE_BADBLOCK_POS; 3110 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
@@ -3539,12 +3532,7 @@ void nand_release(struct mtd_info *mtd)
3539 if (chip->ecc.mode == NAND_ECC_SOFT_BCH) 3532 if (chip->ecc.mode == NAND_ECC_SOFT_BCH)
3540 nand_bch_free((struct nand_bch_control *)chip->ecc.priv); 3533 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
3541 3534
3542#ifdef CONFIG_MTD_PARTITIONS 3535 mtd_device_unregister(mtd);
3543 /* Deregister partitions */
3544 del_mtd_partitions(mtd);
3545#endif
3546 /* Deregister the device */
3547 del_mtd_device(mtd);
3548 3536
3549 /* Free bad block table memory */ 3537 /* Free bad block table memory */
3550 kfree(chip->bbt); 3538 kfree(chip->bbt);
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index af46428286fe..ccbeaa1e4a8e 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -1276,20 +1276,6 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
1276 * while scanning a device for factory marked good / bad blocks. */ 1276 * while scanning a device for factory marked good / bad blocks. */
1277static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; 1277static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
1278 1278
1279static struct nand_bbt_descr smallpage_flashbased = {
1280 .options = NAND_BBT_SCAN2NDPAGE,
1281 .offs = NAND_SMALL_BADBLOCK_POS,
1282 .len = 1,
1283 .pattern = scan_ff_pattern
1284};
1285
1286static struct nand_bbt_descr largepage_flashbased = {
1287 .options = NAND_BBT_SCAN2NDPAGE,
1288 .offs = NAND_LARGE_BADBLOCK_POS,
1289 .len = 2,
1290 .pattern = scan_ff_pattern
1291};
1292
1293static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 }; 1279static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
1294 1280
1295static struct nand_bbt_descr agand_flashbased = { 1281static struct nand_bbt_descr agand_flashbased = {
@@ -1355,10 +1341,6 @@ static struct nand_bbt_descr bbt_mirror_no_bbt_descr = {
1355 * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when 1341 * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
1356 * passed to this function. 1342 * passed to this function.
1357 * 1343 *
1358 * TODO: Handle other flags, replace other static structs
1359 * (e.g. handle NAND_BBT_FLASH for flash-based BBT,
1360 * replace smallpage_flashbased)
1361 *
1362 */ 1344 */
1363static int nand_create_default_bbt_descr(struct nand_chip *this) 1345static int nand_create_default_bbt_descr(struct nand_chip *this)
1364{ 1346{
@@ -1422,15 +1404,14 @@ int nand_default_bbt(struct mtd_info *mtd)
1422 this->bbt_md = &bbt_mirror_descr; 1404 this->bbt_md = &bbt_mirror_descr;
1423 } 1405 }
1424 } 1406 }
1425 if (!this->badblock_pattern) {
1426 this->badblock_pattern = (mtd->writesize > 512) ? &largepage_flashbased : &smallpage_flashbased;
1427 }
1428 } else { 1407 } else {
1429 this->bbt_td = NULL; 1408 this->bbt_td = NULL;
1430 this->bbt_md = NULL; 1409 this->bbt_md = NULL;
1431 if (!this->badblock_pattern)
1432 nand_create_default_bbt_descr(this);
1433 } 1410 }
1411
1412 if (!this->badblock_pattern)
1413 nand_create_default_bbt_descr(this);
1414
1434 return nand_scan_bbt(mtd, this->badblock_pattern); 1415 return nand_scan_bbt(mtd, this->badblock_pattern);
1435} 1416}
1436 1417
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 893d95bfea48..357e8c5252a8 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -2383,7 +2383,9 @@ static int __init ns_init_module(void)
2383 goto err_exit; 2383 goto err_exit;
2384 2384
2385 /* Register NAND partitions */ 2385 /* Register NAND partitions */
2386 if ((retval = add_mtd_partitions(nsmtd, &nand->partitions[0], nand->nbparts)) != 0) 2386 retval = mtd_device_register(nsmtd, &nand->partitions[0],
2387 nand->nbparts);
2388 if (retval != 0)
2387 goto err_exit; 2389 goto err_exit;
2388 2390
2389 return 0; 2391 return 0;
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index bbe6d451290d..ea2dea8a9c88 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -33,6 +33,7 @@
33#include <linux/of_platform.h> 33#include <linux/of_platform.h>
34#include <asm/io.h> 34#include <asm/io.h>
35 35
36#define NDFC_MAX_CS 4
36 37
37struct ndfc_controller { 38struct ndfc_controller {
38 struct platform_device *ofdev; 39 struct platform_device *ofdev;
@@ -41,17 +42,16 @@ struct ndfc_controller {
41 struct nand_chip chip; 42 struct nand_chip chip;
42 int chip_select; 43 int chip_select;
43 struct nand_hw_control ndfc_control; 44 struct nand_hw_control ndfc_control;
44#ifdef CONFIG_MTD_PARTITIONS
45 struct mtd_partition *parts; 45 struct mtd_partition *parts;
46#endif
47}; 46};
48 47
49static struct ndfc_controller ndfc_ctrl; 48static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS];
50 49
51static void ndfc_select_chip(struct mtd_info *mtd, int chip) 50static void ndfc_select_chip(struct mtd_info *mtd, int chip)
52{ 51{
53 uint32_t ccr; 52 uint32_t ccr;
54 struct ndfc_controller *ndfc = &ndfc_ctrl; 53 struct nand_chip *nchip = mtd->priv;
54 struct ndfc_controller *ndfc = nchip->priv;
55 55
56 ccr = in_be32(ndfc->ndfcbase + NDFC_CCR); 56 ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
57 if (chip >= 0) { 57 if (chip >= 0) {
@@ -64,7 +64,8 @@ static void ndfc_select_chip(struct mtd_info *mtd, int chip)
64 64
65static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) 65static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
66{ 66{
67 struct ndfc_controller *ndfc = &ndfc_ctrl; 67 struct nand_chip *chip = mtd->priv;
68 struct ndfc_controller *ndfc = chip->priv;
68 69
69 if (cmd == NAND_CMD_NONE) 70 if (cmd == NAND_CMD_NONE)
70 return; 71 return;
@@ -77,7 +78,8 @@ static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
77 78
78static int ndfc_ready(struct mtd_info *mtd) 79static int ndfc_ready(struct mtd_info *mtd)
79{ 80{
80 struct ndfc_controller *ndfc = &ndfc_ctrl; 81 struct nand_chip *chip = mtd->priv;
82 struct ndfc_controller *ndfc = chip->priv;
81 83
82 return in_be32(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY; 84 return in_be32(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY;
83} 85}
@@ -85,7 +87,8 @@ static int ndfc_ready(struct mtd_info *mtd)
85static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode) 87static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode)
86{ 88{
87 uint32_t ccr; 89 uint32_t ccr;
88 struct ndfc_controller *ndfc = &ndfc_ctrl; 90 struct nand_chip *chip = mtd->priv;
91 struct ndfc_controller *ndfc = chip->priv;
89 92
90 ccr = in_be32(ndfc->ndfcbase + NDFC_CCR); 93 ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
91 ccr |= NDFC_CCR_RESET_ECC; 94 ccr |= NDFC_CCR_RESET_ECC;
@@ -96,7 +99,8 @@ static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode)
96static int ndfc_calculate_ecc(struct mtd_info *mtd, 99static int ndfc_calculate_ecc(struct mtd_info *mtd,
97 const u_char *dat, u_char *ecc_code) 100 const u_char *dat, u_char *ecc_code)
98{ 101{
99 struct ndfc_controller *ndfc = &ndfc_ctrl; 102 struct nand_chip *chip = mtd->priv;
103 struct ndfc_controller *ndfc = chip->priv;
100 uint32_t ecc; 104 uint32_t ecc;
101 uint8_t *p = (uint8_t *)&ecc; 105 uint8_t *p = (uint8_t *)&ecc;
102 106
@@ -119,7 +123,8 @@ static int ndfc_calculate_ecc(struct mtd_info *mtd,
119 */ 123 */
120static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 124static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
121{ 125{
122 struct ndfc_controller *ndfc = &ndfc_ctrl; 126 struct nand_chip *chip = mtd->priv;
127 struct ndfc_controller *ndfc = chip->priv;
123 uint32_t *p = (uint32_t *) buf; 128 uint32_t *p = (uint32_t *) buf;
124 129
125 for(;len > 0; len -= 4) 130 for(;len > 0; len -= 4)
@@ -128,7 +133,8 @@ static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
128 133
129static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 134static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
130{ 135{
131 struct ndfc_controller *ndfc = &ndfc_ctrl; 136 struct nand_chip *chip = mtd->priv;
137 struct ndfc_controller *ndfc = chip->priv;
132 uint32_t *p = (uint32_t *) buf; 138 uint32_t *p = (uint32_t *) buf;
133 139
134 for(;len > 0; len -= 4) 140 for(;len > 0; len -= 4)
@@ -137,7 +143,8 @@ static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
137 143
138static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 144static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
139{ 145{
140 struct ndfc_controller *ndfc = &ndfc_ctrl; 146 struct nand_chip *chip = mtd->priv;
147 struct ndfc_controller *ndfc = chip->priv;
141 uint32_t *p = (uint32_t *) buf; 148 uint32_t *p = (uint32_t *) buf;
142 149
143 for(;len > 0; len -= 4) 150 for(;len > 0; len -= 4)
@@ -152,13 +159,11 @@ static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
152static int ndfc_chip_init(struct ndfc_controller *ndfc, 159static int ndfc_chip_init(struct ndfc_controller *ndfc,
153 struct device_node *node) 160 struct device_node *node)
154{ 161{
155#ifdef CONFIG_MTD_PARTITIONS
156#ifdef CONFIG_MTD_CMDLINE_PARTS 162#ifdef CONFIG_MTD_CMDLINE_PARTS
157 static const char *part_types[] = { "cmdlinepart", NULL }; 163 static const char *part_types[] = { "cmdlinepart", NULL };
158#else 164#else
159 static const char *part_types[] = { NULL }; 165 static const char *part_types[] = { NULL };
160#endif 166#endif
161#endif
162 struct device_node *flash_np; 167 struct device_node *flash_np;
163 struct nand_chip *chip = &ndfc->chip; 168 struct nand_chip *chip = &ndfc->chip;
164 int ret; 169 int ret;
@@ -179,6 +184,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
179 chip->ecc.mode = NAND_ECC_HW; 184 chip->ecc.mode = NAND_ECC_HW;
180 chip->ecc.size = 256; 185 chip->ecc.size = 256;
181 chip->ecc.bytes = 3; 186 chip->ecc.bytes = 3;
187 chip->priv = ndfc;
182 188
183 ndfc->mtd.priv = chip; 189 ndfc->mtd.priv = chip;
184 ndfc->mtd.owner = THIS_MODULE; 190 ndfc->mtd.owner = THIS_MODULE;
@@ -198,25 +204,18 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
198 if (ret) 204 if (ret)
199 goto err; 205 goto err;
200 206
201#ifdef CONFIG_MTD_PARTITIONS
202 ret = parse_mtd_partitions(&ndfc->mtd, part_types, &ndfc->parts, 0); 207 ret = parse_mtd_partitions(&ndfc->mtd, part_types, &ndfc->parts, 0);
203 if (ret < 0) 208 if (ret < 0)
204 goto err; 209 goto err;
205 210
206#ifdef CONFIG_MTD_OF_PARTS
207 if (ret == 0) { 211 if (ret == 0) {
208 ret = of_mtd_parse_partitions(&ndfc->ofdev->dev, flash_np, 212 ret = of_mtd_parse_partitions(&ndfc->ofdev->dev, flash_np,
209 &ndfc->parts); 213 &ndfc->parts);
210 if (ret < 0) 214 if (ret < 0)
211 goto err; 215 goto err;
212 } 216 }
213#endif
214 217
215 if (ret > 0) 218 ret = mtd_device_register(&ndfc->mtd, ndfc->parts, ret);
216 ret = add_mtd_partitions(&ndfc->mtd, ndfc->parts, ret);
217 else
218#endif
219 ret = add_mtd_device(&ndfc->mtd);
220 219
221err: 220err:
222 of_node_put(flash_np); 221 of_node_put(flash_np);
@@ -227,15 +226,10 @@ err:
227 226
228static int __devinit ndfc_probe(struct platform_device *ofdev) 227static int __devinit ndfc_probe(struct platform_device *ofdev)
229{ 228{
230 struct ndfc_controller *ndfc = &ndfc_ctrl; 229 struct ndfc_controller *ndfc;
231 const __be32 *reg; 230 const __be32 *reg;
232 u32 ccr; 231 u32 ccr;
233 int err, len; 232 int err, len, cs;
234
235 spin_lock_init(&ndfc->ndfc_control.lock);
236 init_waitqueue_head(&ndfc->ndfc_control.wq);
237 ndfc->ofdev = ofdev;
238 dev_set_drvdata(&ofdev->dev, ndfc);
239 233
240 /* Read the reg property to get the chip select */ 234 /* Read the reg property to get the chip select */
241 reg = of_get_property(ofdev->dev.of_node, "reg", &len); 235 reg = of_get_property(ofdev->dev.of_node, "reg", &len);
@@ -243,7 +237,20 @@ static int __devinit ndfc_probe(struct platform_device *ofdev)
243 dev_err(&ofdev->dev, "unable read reg property (%d)\n", len); 237 dev_err(&ofdev->dev, "unable read reg property (%d)\n", len);
244 return -ENOENT; 238 return -ENOENT;
245 } 239 }
246 ndfc->chip_select = be32_to_cpu(reg[0]); 240
241 cs = be32_to_cpu(reg[0]);
242 if (cs >= NDFC_MAX_CS) {
243 dev_err(&ofdev->dev, "invalid CS number (%d)\n", cs);
244 return -EINVAL;
245 }
246
247 ndfc = &ndfc_ctrl[cs];
248 ndfc->chip_select = cs;
249
250 spin_lock_init(&ndfc->ndfc_control.lock);
251 init_waitqueue_head(&ndfc->ndfc_control.wq);
252 ndfc->ofdev = ofdev;
253 dev_set_drvdata(&ofdev->dev, ndfc);
247 254
248 ndfc->ndfcbase = of_iomap(ofdev->dev.of_node, 0); 255 ndfc->ndfcbase = of_iomap(ofdev->dev.of_node, 0);
249 if (!ndfc->ndfcbase) { 256 if (!ndfc->ndfcbase) {
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index a045a4a581b6..b6a5c86ab31e 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -158,12 +158,7 @@ static int nomadik_nand_probe(struct platform_device *pdev)
158 goto err_unmap; 158 goto err_unmap;
159 } 159 }
160 160
161#ifdef CONFIG_MTD_PARTITIONS 161 mtd_device_register(&host->mtd, pdata->parts, pdata->nparts);
162 add_mtd_partitions(&host->mtd, pdata->parts, pdata->nparts);
163#else
164 pr_info("Registering %s as whole device\n", mtd->name);
165 add_mtd_device(mtd);
166#endif
167 162
168 platform_set_drvdata(pdev, host); 163 platform_set_drvdata(pdev, host);
169 return 0; 164 return 0;
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 6eddf7361ed7..9c30a0b03171 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -321,8 +321,8 @@ static int __devinit nuc900_nand_probe(struct platform_device *pdev)
321 goto fail3; 321 goto fail3;
322 } 322 }
323 323
324 add_mtd_partitions(&(nuc900_nand->mtd), partitions, 324 mtd_device_register(&(nuc900_nand->mtd), partitions,
325 ARRAY_SIZE(partitions)); 325 ARRAY_SIZE(partitions));
326 326
327 platform_set_drvdata(pdev, nuc900_nand); 327 platform_set_drvdata(pdev, nuc900_nand);
328 328
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index da9a351c9d79..0db2c0e7656a 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -94,9 +94,7 @@
94#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0) 94#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
95#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1) 95#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
96 96
97#ifdef CONFIG_MTD_PARTITIONS
98static const char *part_probes[] = { "cmdlinepart", NULL }; 97static const char *part_probes[] = { "cmdlinepart", NULL };
99#endif
100 98
101/* oob info generated runtime depending on ecc algorithm and layout selected */ 99/* oob info generated runtime depending on ecc algorithm and layout selected */
102static struct nand_ecclayout omap_oobinfo; 100static struct nand_ecclayout omap_oobinfo;
@@ -263,11 +261,10 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
263 if (ret) { 261 if (ret) {
264 /* PFPW engine is busy, use cpu copy method */ 262 /* PFPW engine is busy, use cpu copy method */
265 if (info->nand.options & NAND_BUSWIDTH_16) 263 if (info->nand.options & NAND_BUSWIDTH_16)
266 omap_read_buf16(mtd, buf, len); 264 omap_read_buf16(mtd, (u_char *)p, len);
267 else 265 else
268 omap_read_buf8(mtd, buf, len); 266 omap_read_buf8(mtd, (u_char *)p, len);
269 } else { 267 } else {
270 p = (u32 *) buf;
271 do { 268 do {
272 r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); 269 r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
273 r_count = r_count >> 2; 270 r_count = r_count >> 2;
@@ -293,7 +290,7 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
293 struct omap_nand_info, mtd); 290 struct omap_nand_info, mtd);
294 uint32_t w_count = 0; 291 uint32_t w_count = 0;
295 int i = 0, ret = 0; 292 int i = 0, ret = 0;
296 u16 *p; 293 u16 *p = (u16 *)buf;
297 unsigned long tim, limit; 294 unsigned long tim, limit;
298 295
299 /* take care of subpage writes */ 296 /* take care of subpage writes */
@@ -309,11 +306,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
309 if (ret) { 306 if (ret) {
310 /* PFPW engine is busy, use cpu copy method */ 307 /* PFPW engine is busy, use cpu copy method */
311 if (info->nand.options & NAND_BUSWIDTH_16) 308 if (info->nand.options & NAND_BUSWIDTH_16)
312 omap_write_buf16(mtd, buf, len); 309 omap_write_buf16(mtd, (u_char *)p, len);
313 else 310 else
314 omap_write_buf8(mtd, buf, len); 311 omap_write_buf8(mtd, (u_char *)p, len);
315 } else { 312 } else {
316 p = (u16 *) buf;
317 while (len) { 313 while (len) {
318 w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); 314 w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
319 w_count = w_count >> 1; 315 w_count = w_count >> 1;
@@ -1073,9 +1069,9 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1073 /* DIP switches on some boards change between 8 and 16 bit 1069 /* DIP switches on some boards change between 8 and 16 bit
1074 * bus widths for flash. Try the other width if the first try fails. 1070 * bus widths for flash. Try the other width if the first try fails.
1075 */ 1071 */
1076 if (nand_scan(&info->mtd, 1)) { 1072 if (nand_scan_ident(&info->mtd, 1, NULL)) {
1077 info->nand.options ^= NAND_BUSWIDTH_16; 1073 info->nand.options ^= NAND_BUSWIDTH_16;
1078 if (nand_scan(&info->mtd, 1)) { 1074 if (nand_scan_ident(&info->mtd, 1, NULL)) {
1079 err = -ENXIO; 1075 err = -ENXIO;
1080 goto out_release_mem_region; 1076 goto out_release_mem_region;
1081 } 1077 }
@@ -1101,15 +1097,19 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1101 info->nand.ecc.layout = &omap_oobinfo; 1097 info->nand.ecc.layout = &omap_oobinfo;
1102 } 1098 }
1103 1099
1104#ifdef CONFIG_MTD_PARTITIONS 1100 /* second phase scan */
1101 if (nand_scan_tail(&info->mtd)) {
1102 err = -ENXIO;
1103 goto out_release_mem_region;
1104 }
1105
1105 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); 1106 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
1106 if (err > 0) 1107 if (err > 0)
1107 add_mtd_partitions(&info->mtd, info->parts, err); 1108 mtd_device_register(&info->mtd, info->parts, err);
1108 else if (pdata->parts) 1109 else if (pdata->parts)
1109 add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts); 1110 mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
1110 else 1111 else
1111#endif 1112 mtd_device_register(&info->mtd, NULL, 0);
1112 add_mtd_device(&info->mtd);
1113 1113
1114 platform_set_drvdata(pdev, &info->mtd); 1114 platform_set_drvdata(pdev, &info->mtd);
1115 1115
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index da6e75343052..7794d0680f91 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -21,9 +21,7 @@
21#include <mach/hardware.h> 21#include <mach/hardware.h>
22#include <plat/orion_nand.h> 22#include <plat/orion_nand.h>
23 23
24#ifdef CONFIG_MTD_CMDLINE_PARTS
25static const char *part_probes[] = { "cmdlinepart", NULL }; 24static const char *part_probes[] = { "cmdlinepart", NULL };
26#endif
27 25
28static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) 26static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
29{ 27{
@@ -83,10 +81,8 @@ static int __init orion_nand_probe(struct platform_device *pdev)
83 struct resource *res; 81 struct resource *res;
84 void __iomem *io_base; 82 void __iomem *io_base;
85 int ret = 0; 83 int ret = 0;
86#ifdef CONFIG_MTD_PARTITIONS
87 struct mtd_partition *partitions = NULL; 84 struct mtd_partition *partitions = NULL;
88 int num_part = 0; 85 int num_part = 0;
89#endif
90 86
91 nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL); 87 nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
92 if (!nc) { 88 if (!nc) {
@@ -136,7 +132,6 @@ static int __init orion_nand_probe(struct platform_device *pdev)
136 goto no_dev; 132 goto no_dev;
137 } 133 }
138 134
139#ifdef CONFIG_MTD_PARTITIONS
140#ifdef CONFIG_MTD_CMDLINE_PARTS 135#ifdef CONFIG_MTD_CMDLINE_PARTS
141 mtd->name = "orion_nand"; 136 mtd->name = "orion_nand";
142 num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0); 137 num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
@@ -147,14 +142,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
147 partitions = board->parts; 142 partitions = board->parts;
148 } 143 }
149 144
150 if (partitions && num_part > 0) 145 ret = mtd_device_register(mtd, partitions, num_part);
151 ret = add_mtd_partitions(mtd, partitions, num_part);
152 else
153 ret = add_mtd_device(mtd);
154#else
155 ret = add_mtd_device(mtd);
156#endif
157
158 if (ret) { 146 if (ret) {
159 nand_release(mtd); 147 nand_release(mtd);
160 goto no_dev; 148 goto no_dev;
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 20bfe5f15afd..b1aa41b8a4eb 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -163,7 +163,7 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
163 goto out_lpc; 163 goto out_lpc;
164 } 164 }
165 165
166 if (add_mtd_device(pasemi_nand_mtd)) { 166 if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) {
167 printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n"); 167 printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n");
168 err = -ENODEV; 168 err = -ENODEV;
169 goto out_lpc; 169 goto out_lpc;
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index caf5a736340a..633c04bf76f6 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -21,10 +21,8 @@ struct plat_nand_data {
21 struct nand_chip chip; 21 struct nand_chip chip;
22 struct mtd_info mtd; 22 struct mtd_info mtd;
23 void __iomem *io_base; 23 void __iomem *io_base;
24#ifdef CONFIG_MTD_PARTITIONS
25 int nr_parts; 24 int nr_parts;
26 struct mtd_partition *parts; 25 struct mtd_partition *parts;
27#endif
28}; 26};
29 27
30/* 28/*
@@ -101,13 +99,12 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
101 goto out; 99 goto out;
102 } 100 }
103 101
104#ifdef CONFIG_MTD_PARTITIONS
105 if (pdata->chip.part_probe_types) { 102 if (pdata->chip.part_probe_types) {
106 err = parse_mtd_partitions(&data->mtd, 103 err = parse_mtd_partitions(&data->mtd,
107 pdata->chip.part_probe_types, 104 pdata->chip.part_probe_types,
108 &data->parts, 0); 105 &data->parts, 0);
109 if (err > 0) { 106 if (err > 0) {
110 add_mtd_partitions(&data->mtd, data->parts, err); 107 mtd_device_register(&data->mtd, data->parts, err);
111 return 0; 108 return 0;
112 } 109 }
113 } 110 }
@@ -115,11 +112,10 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
115 pdata->chip.set_parts(data->mtd.size, &pdata->chip); 112 pdata->chip.set_parts(data->mtd.size, &pdata->chip);
116 if (pdata->chip.partitions) { 113 if (pdata->chip.partitions) {
117 data->parts = pdata->chip.partitions; 114 data->parts = pdata->chip.partitions;
118 err = add_mtd_partitions(&data->mtd, data->parts, 115 err = mtd_device_register(&data->mtd, data->parts,
119 pdata->chip.nr_partitions); 116 pdata->chip.nr_partitions);
120 } else 117 } else
121#endif 118 err = mtd_device_register(&data->mtd, NULL, 0);
122 err = add_mtd_device(&data->mtd);
123 119
124 if (!err) 120 if (!err)
125 return err; 121 return err;
@@ -149,10 +145,8 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
149 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 145 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
150 146
151 nand_release(&data->mtd); 147 nand_release(&data->mtd);
152#ifdef CONFIG_MTD_PARTITIONS
153 if (data->parts && data->parts != pdata->chip.partitions) 148 if (data->parts && data->parts != pdata->chip.partitions)
154 kfree(data->parts); 149 kfree(data->parts);
155#endif
156 if (pdata->ctrl.remove) 150 if (pdata->ctrl.remove)
157 pdata->ctrl.remove(pdev); 151 pdata->ctrl.remove(pdev);
158 iounmap(data->io_base); 152 iounmap(data->io_base);
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
index cc8658431851..3bbb796b451c 100644
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ b/drivers/mtd/nand/ppchameleonevb.c
@@ -73,7 +73,6 @@ __setup("ppchameleon_fio_pbase=", ppchameleon_fio_pbase);
73__setup("ppchameleonevb_fio_pbase=", ppchameleonevb_fio_pbase); 73__setup("ppchameleonevb_fio_pbase=", ppchameleonevb_fio_pbase);
74#endif 74#endif
75 75
76#ifdef CONFIG_MTD_PARTITIONS
77/* 76/*
78 * Define static partitions for flash devices 77 * Define static partitions for flash devices
79 */ 78 */
@@ -101,7 +100,6 @@ static struct mtd_partition partition_info_evb[] = {
101#define NUM_PARTITIONS 1 100#define NUM_PARTITIONS 1
102 101
103extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, const char *mtd_id); 102extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, const char *mtd_id);
104#endif
105 103
106/* 104/*
107 * hardware specific access to control-lines 105 * hardware specific access to control-lines
@@ -189,10 +187,8 @@ static int ppchameleonevb_device_ready(struct mtd_info *minfo)
189} 187}
190#endif 188#endif
191 189
192#ifdef CONFIG_MTD_PARTITIONS
193const char *part_probes[] = { "cmdlinepart", NULL }; 190const char *part_probes[] = { "cmdlinepart", NULL };
194const char *part_probes_evb[] = { "cmdlinepart", NULL }; 191const char *part_probes_evb[] = { "cmdlinepart", NULL };
195#endif
196 192
197/* 193/*
198 * Main initialization routine 194 * Main initialization routine
@@ -284,14 +280,13 @@ static int __init ppchameleonevb_init(void)
284 this->chip_delay = NAND_SMALL_DELAY_US; 280 this->chip_delay = NAND_SMALL_DELAY_US;
285#endif 281#endif
286 282
287#ifdef CONFIG_MTD_PARTITIONS
288 ppchameleon_mtd->name = "ppchameleon-nand"; 283 ppchameleon_mtd->name = "ppchameleon-nand";
289 mtd_parts_nb = parse_mtd_partitions(ppchameleon_mtd, part_probes, &mtd_parts, 0); 284 mtd_parts_nb = parse_mtd_partitions(ppchameleon_mtd, part_probes, &mtd_parts, 0);
290 if (mtd_parts_nb > 0) 285 if (mtd_parts_nb > 0)
291 part_type = "command line"; 286 part_type = "command line";
292 else 287 else
293 mtd_parts_nb = 0; 288 mtd_parts_nb = 0;
294#endif 289
295 if (mtd_parts_nb == 0) { 290 if (mtd_parts_nb == 0) {
296 if (ppchameleon_mtd->size == NAND_SMALL_SIZE) 291 if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
297 mtd_parts = partition_info_me; 292 mtd_parts = partition_info_me;
@@ -303,7 +298,7 @@ static int __init ppchameleonevb_init(void)
303 298
304 /* Register the partitions */ 299 /* Register the partitions */
305 printk(KERN_NOTICE "Using %s partition definition\n", part_type); 300 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
306 add_mtd_partitions(ppchameleon_mtd, mtd_parts, mtd_parts_nb); 301 mtd_device_register(ppchameleon_mtd, mtd_parts, mtd_parts_nb);
307 302
308 nand_evb_init: 303 nand_evb_init:
309 /**************************** 304 /****************************
@@ -385,14 +380,14 @@ static int __init ppchameleonevb_init(void)
385 iounmap(ppchameleon_fio_base); 380 iounmap(ppchameleon_fio_base);
386 return -ENXIO; 381 return -ENXIO;
387 } 382 }
388#ifdef CONFIG_MTD_PARTITIONS 383
389 ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME; 384 ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
390 mtd_parts_nb = parse_mtd_partitions(ppchameleonevb_mtd, part_probes_evb, &mtd_parts, 0); 385 mtd_parts_nb = parse_mtd_partitions(ppchameleonevb_mtd, part_probes_evb, &mtd_parts, 0);
391 if (mtd_parts_nb > 0) 386 if (mtd_parts_nb > 0)
392 part_type = "command line"; 387 part_type = "command line";
393 else 388 else
394 mtd_parts_nb = 0; 389 mtd_parts_nb = 0;
395#endif 390
396 if (mtd_parts_nb == 0) { 391 if (mtd_parts_nb == 0) {
397 mtd_parts = partition_info_evb; 392 mtd_parts = partition_info_evb;
398 mtd_parts_nb = NUM_PARTITIONS; 393 mtd_parts_nb = NUM_PARTITIONS;
@@ -401,7 +396,7 @@ static int __init ppchameleonevb_init(void)
401 396
402 /* Register the partitions */ 397 /* Register the partitions */
403 printk(KERN_NOTICE "Using %s partition definition\n", part_type); 398 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
404 add_mtd_partitions(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb); 399 mtd_device_register(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb);
405 400
406 /* Return happy */ 401 /* Return happy */
407 return 0; 402 return 0;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index ff0701276d65..1fb3b3a80581 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1119,10 +1119,7 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
1119 clk_put(info->clk); 1119 clk_put(info->clk);
1120 1120
1121 if (mtd) { 1121 if (mtd) {
1122 del_mtd_device(mtd); 1122 mtd_device_unregister(mtd);
1123#ifdef CONFIG_MTD_PARTITIONS
1124 del_mtd_partitions(mtd);
1125#endif
1126 kfree(mtd); 1123 kfree(mtd);
1127 } 1124 }
1128 return 0; 1125 return 0;
@@ -1149,7 +1146,6 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1149 return -ENODEV; 1146 return -ENODEV;
1150 } 1147 }
1151 1148
1152#ifdef CONFIG_MTD_PARTITIONS
1153 if (mtd_has_cmdlinepart()) { 1149 if (mtd_has_cmdlinepart()) {
1154 const char *probes[] = { "cmdlinepart", NULL }; 1150 const char *probes[] = { "cmdlinepart", NULL };
1155 struct mtd_partition *parts; 1151 struct mtd_partition *parts;
@@ -1158,13 +1154,10 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1158 nr_parts = parse_mtd_partitions(info->mtd, probes, &parts, 0); 1154 nr_parts = parse_mtd_partitions(info->mtd, probes, &parts, 0);
1159 1155
1160 if (nr_parts) 1156 if (nr_parts)
1161 return add_mtd_partitions(info->mtd, parts, nr_parts); 1157 return mtd_device_register(info->mtd, parts, nr_parts);
1162 } 1158 }
1163 1159
1164 return add_mtd_partitions(info->mtd, pdata->parts, pdata->nr_parts); 1160 return mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
1165#else
1166 return 0;
1167#endif
1168} 1161}
1169 1162
1170#ifdef CONFIG_PM 1163#ifdef CONFIG_PM
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
index 67440b5beef8..c9f9127ff770 100644
--- a/drivers/mtd/nand/rtc_from4.c
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -580,7 +580,8 @@ static int __init rtc_from4_init(void)
580#endif 580#endif
581 581
582 /* Register the partitions */ 582 /* Register the partitions */
583 ret = add_mtd_partitions(rtc_from4_mtd, partition_info, NUM_PARTITIONS); 583 ret = mtd_device_register(rtc_from4_mtd, partition_info,
584 NUM_PARTITIONS);
584 if (ret) 585 if (ret)
585 goto err_3; 586 goto err_3;
586 587
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 33d832dddfdd..4405468f196b 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -55,7 +55,7 @@ static int hardware_ecc = 0;
55#endif 55#endif
56 56
57#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP 57#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
58static int clock_stop = 1; 58static const int clock_stop = 1;
59#else 59#else
60static const int clock_stop = 0; 60static const int clock_stop = 0;
61#endif 61#endif
@@ -96,6 +96,12 @@ enum s3c_cpu_type {
96 TYPE_S3C2440, 96 TYPE_S3C2440,
97}; 97};
98 98
99enum s3c_nand_clk_state {
100 CLOCK_DISABLE = 0,
101 CLOCK_ENABLE,
102 CLOCK_SUSPEND,
103};
104
99/* overview of the s3c2410 nand state */ 105/* overview of the s3c2410 nand state */
100 106
101/** 107/**
@@ -111,6 +117,7 @@ enum s3c_cpu_type {
111 * @mtd_count: The number of MTDs created from this controller. 117 * @mtd_count: The number of MTDs created from this controller.
112 * @save_sel: The contents of @sel_reg to be saved over suspend. 118 * @save_sel: The contents of @sel_reg to be saved over suspend.
113 * @clk_rate: The clock rate from @clk. 119 * @clk_rate: The clock rate from @clk.
120 * @clk_state: The current clock state.
114 * @cpu_type: The exact type of this controller. 121 * @cpu_type: The exact type of this controller.
115 */ 122 */
116struct s3c2410_nand_info { 123struct s3c2410_nand_info {
@@ -129,6 +136,7 @@ struct s3c2410_nand_info {
129 int mtd_count; 136 int mtd_count;
130 unsigned long save_sel; 137 unsigned long save_sel;
131 unsigned long clk_rate; 138 unsigned long clk_rate;
139 enum s3c_nand_clk_state clk_state;
132 140
133 enum s3c_cpu_type cpu_type; 141 enum s3c_cpu_type cpu_type;
134 142
@@ -159,11 +167,33 @@ static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
159 return dev->dev.platform_data; 167 return dev->dev.platform_data;
160} 168}
161 169
162static inline int allow_clk_stop(struct s3c2410_nand_info *info) 170static inline int allow_clk_suspend(struct s3c2410_nand_info *info)
163{ 171{
164 return clock_stop; 172 return clock_stop;
165} 173}
166 174
175/**
176 * s3c2410_nand_clk_set_state - Enable, disable or suspend NAND clock.
177 * @info: The controller instance.
178 * @new_state: State to which clock should be set.
179 */
180static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info,
181 enum s3c_nand_clk_state new_state)
182{
183 if (!allow_clk_suspend(info) && new_state == CLOCK_SUSPEND)
184 return;
185
186 if (info->clk_state == CLOCK_ENABLE) {
187 if (new_state != CLOCK_ENABLE)
188 clk_disable(info->clk);
189 } else {
190 if (new_state == CLOCK_ENABLE)
191 clk_enable(info->clk);
192 }
193
194 info->clk_state = new_state;
195}
196
167/* timing calculations */ 197/* timing calculations */
168 198
169#define NS_IN_KHZ 1000000 199#define NS_IN_KHZ 1000000
@@ -333,8 +363,8 @@ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
333 nmtd = this->priv; 363 nmtd = this->priv;
334 info = nmtd->info; 364 info = nmtd->info;
335 365
336 if (chip != -1 && allow_clk_stop(info)) 366 if (chip != -1)
337 clk_enable(info->clk); 367 s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
338 368
339 cur = readl(info->sel_reg); 369 cur = readl(info->sel_reg);
340 370
@@ -356,8 +386,8 @@ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
356 386
357 writel(cur, info->sel_reg); 387 writel(cur, info->sel_reg);
358 388
359 if (chip == -1 && allow_clk_stop(info)) 389 if (chip == -1)
360 clk_disable(info->clk); 390 s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
361} 391}
362 392
363/* s3c2410_nand_hwcontrol 393/* s3c2410_nand_hwcontrol
@@ -694,8 +724,7 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
694 /* free the common resources */ 724 /* free the common resources */
695 725
696 if (info->clk != NULL && !IS_ERR(info->clk)) { 726 if (info->clk != NULL && !IS_ERR(info->clk)) {
697 if (!allow_clk_stop(info)) 727 s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
698 clk_disable(info->clk);
699 clk_put(info->clk); 728 clk_put(info->clk);
700 } 729 }
701 730
@@ -715,7 +744,6 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
715 return 0; 744 return 0;
716} 745}
717 746
718#ifdef CONFIG_MTD_PARTITIONS
719const char *part_probes[] = { "cmdlinepart", NULL }; 747const char *part_probes[] = { "cmdlinepart", NULL };
720static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info, 748static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
721 struct s3c2410_nand_mtd *mtd, 749 struct s3c2410_nand_mtd *mtd,
@@ -725,7 +753,7 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
725 int nr_part = 0; 753 int nr_part = 0;
726 754
727 if (set == NULL) 755 if (set == NULL)
728 return add_mtd_device(&mtd->mtd); 756 return mtd_device_register(&mtd->mtd, NULL, 0);
729 757
730 mtd->mtd.name = set->name; 758 mtd->mtd.name = set->name;
731 nr_part = parse_mtd_partitions(&mtd->mtd, part_probes, &part_info, 0); 759 nr_part = parse_mtd_partitions(&mtd->mtd, part_probes, &part_info, 0);
@@ -735,19 +763,8 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
735 part_info = set->partitions; 763 part_info = set->partitions;
736 } 764 }
737 765
738 if (nr_part > 0 && part_info) 766 return mtd_device_register(&mtd->mtd, part_info, nr_part);
739 return add_mtd_partitions(&mtd->mtd, part_info, nr_part);
740
741 return add_mtd_device(&mtd->mtd);
742}
743#else
744static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
745 struct s3c2410_nand_mtd *mtd,
746 struct s3c2410_nand_set *set)
747{
748 return add_mtd_device(&mtd->mtd);
749} 767}
750#endif
751 768
752/** 769/**
753 * s3c2410_nand_init_chip - initialise a single instance of an chip 770 * s3c2410_nand_init_chip - initialise a single instance of an chip
@@ -947,7 +964,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
947 goto exit_error; 964 goto exit_error;
948 } 965 }
949 966
950 clk_enable(info->clk); 967 s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
951 968
952 /* allocate and map the resource */ 969 /* allocate and map the resource */
953 970
@@ -1026,9 +1043,9 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
1026 goto exit_error; 1043 goto exit_error;
1027 } 1044 }
1028 1045
1029 if (allow_clk_stop(info)) { 1046 if (allow_clk_suspend(info)) {
1030 dev_info(&pdev->dev, "clock idle support enabled\n"); 1047 dev_info(&pdev->dev, "clock idle support enabled\n");
1031 clk_disable(info->clk); 1048 s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
1032 } 1049 }
1033 1050
1034 pr_debug("initialised ok\n"); 1051 pr_debug("initialised ok\n");
@@ -1059,8 +1076,7 @@ static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
1059 1076
1060 writel(info->save_sel | info->sel_bit, info->sel_reg); 1077 writel(info->save_sel | info->sel_bit, info->sel_reg);
1061 1078
1062 if (!allow_clk_stop(info)) 1079 s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
1063 clk_disable(info->clk);
1064 } 1080 }
1065 1081
1066 return 0; 1082 return 0;
@@ -1072,7 +1088,7 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
1072 unsigned long sel; 1088 unsigned long sel;
1073 1089
1074 if (info) { 1090 if (info) {
1075 clk_enable(info->clk); 1091 s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
1076 s3c2410_nand_inithw(info); 1092 s3c2410_nand_inithw(info);
1077 1093
1078 /* Restore the state of the nFCE line. */ 1094 /* Restore the state of the nFCE line. */
@@ -1082,8 +1098,7 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
1082 sel |= info->save_sel & info->sel_bit; 1098 sel |= info->save_sel & info->sel_bit;
1083 writel(sel, info->sel_reg); 1099 writel(sel, info->sel_reg);
1084 1100
1085 if (allow_clk_stop(info)) 1101 s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
1086 clk_disable(info->clk);
1087 } 1102 }
1088 1103
1089 return 0; 1104 return 0;
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 81bbb5ee148d..93b1f74321c2 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -867,7 +867,7 @@ static int __devinit flctl_probe(struct platform_device *pdev)
867 if (ret) 867 if (ret)
868 goto err; 868 goto err;
869 869
870 add_mtd_partitions(flctl_mtd, pdata->parts, pdata->nr_parts); 870 mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
871 871
872 return 0; 872 return 0;
873 873
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 54ec7542a7b7..19e24ed089ea 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -103,9 +103,7 @@ static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat,
103 return readb(sharpsl->io + ECCCNTR) != 0; 103 return readb(sharpsl->io + ECCCNTR) != 0;
104} 104}
105 105
106#ifdef CONFIG_MTD_PARTITIONS
107static const char *part_probes[] = { "cmdlinepart", NULL }; 106static const char *part_probes[] = { "cmdlinepart", NULL };
108#endif
109 107
110/* 108/*
111 * Main initialization routine 109 * Main initialization routine
@@ -113,10 +111,8 @@ static const char *part_probes[] = { "cmdlinepart", NULL };
113static int __devinit sharpsl_nand_probe(struct platform_device *pdev) 111static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
114{ 112{
115 struct nand_chip *this; 113 struct nand_chip *this;
116#ifdef CONFIG_MTD_PARTITIONS
117 struct mtd_partition *sharpsl_partition_info; 114 struct mtd_partition *sharpsl_partition_info;
118 int nr_partitions; 115 int nr_partitions;
119#endif
120 struct resource *r; 116 struct resource *r;
121 int err = 0; 117 int err = 0;
122 struct sharpsl_nand *sharpsl; 118 struct sharpsl_nand *sharpsl;
@@ -188,18 +184,14 @@ static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
188 184
189 /* Register the partitions */ 185 /* Register the partitions */
190 sharpsl->mtd.name = "sharpsl-nand"; 186 sharpsl->mtd.name = "sharpsl-nand";
191#ifdef CONFIG_MTD_PARTITIONS
192 nr_partitions = parse_mtd_partitions(&sharpsl->mtd, part_probes, &sharpsl_partition_info, 0); 187 nr_partitions = parse_mtd_partitions(&sharpsl->mtd, part_probes, &sharpsl_partition_info, 0);
193 if (nr_partitions <= 0) { 188 if (nr_partitions <= 0) {
194 nr_partitions = data->nr_partitions; 189 nr_partitions = data->nr_partitions;
195 sharpsl_partition_info = data->partitions; 190 sharpsl_partition_info = data->partitions;
196 } 191 }
197 192
198 if (nr_partitions > 0) 193 err = mtd_device_register(&sharpsl->mtd, sharpsl_partition_info,
199 err = add_mtd_partitions(&sharpsl->mtd, sharpsl_partition_info, nr_partitions); 194 nr_partitions);
200 else
201#endif
202 err = add_mtd_device(&sharpsl->mtd);
203 if (err) 195 if (err)
204 goto err_add; 196 goto err_add;
205 197
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index 57cc80cd01a3..b6332e83b289 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -139,7 +139,7 @@ int sm_register_device(struct mtd_info *mtd, int smartmedia)
139 if (ret) 139 if (ret)
140 return ret; 140 return ret;
141 141
142 return add_mtd_device(mtd); 142 return mtd_device_register(mtd, NULL, 0);
143} 143}
144EXPORT_SYMBOL_GPL(sm_register_device); 144EXPORT_SYMBOL_GPL(sm_register_device);
145 145
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index a853548986f0..ca2d0555729e 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -155,9 +155,7 @@ static int socrates_nand_device_ready(struct mtd_info *mtd)
155 return 1; 155 return 1;
156} 156}
157 157
158#ifdef CONFIG_MTD_PARTITIONS
159static const char *part_probes[] = { "cmdlinepart", NULL }; 158static const char *part_probes[] = { "cmdlinepart", NULL };
160#endif
161 159
162/* 160/*
163 * Probe for the NAND device. 161 * Probe for the NAND device.
@@ -168,11 +166,8 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
168 struct mtd_info *mtd; 166 struct mtd_info *mtd;
169 struct nand_chip *nand_chip; 167 struct nand_chip *nand_chip;
170 int res; 168 int res;
171
172#ifdef CONFIG_MTD_PARTITIONS
173 struct mtd_partition *partitions = NULL; 169 struct mtd_partition *partitions = NULL;
174 int num_partitions = 0; 170 int num_partitions = 0;
175#endif
176 171
177 /* Allocate memory for the device structure (and zero it) */ 172 /* Allocate memory for the device structure (and zero it) */
178 host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL); 173 host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL);
@@ -230,7 +225,6 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
230 goto out; 225 goto out;
231 } 226 }
232 227
233#ifdef CONFIG_MTD_PARTITIONS
234#ifdef CONFIG_MTD_CMDLINE_PARTS 228#ifdef CONFIG_MTD_CMDLINE_PARTS
235 num_partitions = parse_mtd_partitions(mtd, part_probes, 229 num_partitions = parse_mtd_partitions(mtd, part_probes,
236 &partitions, 0); 230 &partitions, 0);
@@ -240,7 +234,6 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
240 } 234 }
241#endif 235#endif
242 236
243#ifdef CONFIG_MTD_OF_PARTS
244 if (num_partitions == 0) { 237 if (num_partitions == 0) {
245 num_partitions = of_mtd_parse_partitions(&ofdev->dev, 238 num_partitions = of_mtd_parse_partitions(&ofdev->dev,
246 ofdev->dev.of_node, 239 ofdev->dev.of_node,
@@ -250,19 +243,12 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
250 goto release; 243 goto release;
251 } 244 }
252 } 245 }
253#endif
254 if (partitions && (num_partitions > 0))
255 res = add_mtd_partitions(mtd, partitions, num_partitions);
256 else
257#endif
258 res = add_mtd_device(mtd);
259 246
247 res = mtd_device_register(mtd, partitions, num_partitions);
260 if (!res) 248 if (!res)
261 return res; 249 return res;
262 250
263#ifdef CONFIG_MTD_PARTITIONS
264release: 251release:
265#endif
266 nand_release(mtd); 252 nand_release(mtd);
267 253
268out: 254out:
diff --git a/drivers/mtd/nand/spia.c b/drivers/mtd/nand/spia.c
index 0cc6d0acb8fe..bef76cd7c24c 100644
--- a/drivers/mtd/nand/spia.c
+++ b/drivers/mtd/nand/spia.c
@@ -149,7 +149,7 @@ static int __init spia_init(void)
149 } 149 }
150 150
151 /* Register the partitions */ 151 /* Register the partitions */
152 add_mtd_partitions(spia_mtd, partition_info, NUM_PARTITIONS); 152 mtd_device_register(spia_mtd, partition_info, NUM_PARTITIONS);
153 153
154 /* Return happy */ 154 /* Return happy */
155 return 0; 155 return 0;
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index c004e474631b..11e8371b5683 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -381,10 +381,8 @@ static int tmio_probe(struct platform_device *dev)
381 struct tmio_nand *tmio; 381 struct tmio_nand *tmio;
382 struct mtd_info *mtd; 382 struct mtd_info *mtd;
383 struct nand_chip *nand_chip; 383 struct nand_chip *nand_chip;
384#ifdef CONFIG_MTD_PARTITIONS
385 struct mtd_partition *parts; 384 struct mtd_partition *parts;
386 int nbparts = 0; 385 int nbparts = 0;
387#endif
388 int retval; 386 int retval;
389 387
390 if (data == NULL) 388 if (data == NULL)
@@ -463,7 +461,6 @@ static int tmio_probe(struct platform_device *dev)
463 goto err_scan; 461 goto err_scan;
464 } 462 }
465 /* Register the partitions */ 463 /* Register the partitions */
466#ifdef CONFIG_MTD_PARTITIONS
467#ifdef CONFIG_MTD_CMDLINE_PARTS 464#ifdef CONFIG_MTD_CMDLINE_PARTS
468 nbparts = parse_mtd_partitions(mtd, part_probes, &parts, 0); 465 nbparts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
469#endif 466#endif
@@ -472,12 +469,7 @@ static int tmio_probe(struct platform_device *dev)
472 nbparts = data->num_partitions; 469 nbparts = data->num_partitions;
473 } 470 }
474 471
475 if (nbparts) 472 retval = mtd_device_register(mtd, parts, nbparts);
476 retval = add_mtd_partitions(mtd, parts, nbparts);
477 else
478#endif
479 retval = add_mtd_device(mtd);
480
481 if (!retval) 473 if (!retval)
482 return retval; 474 return retval;
483 475
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index ca270a4881a4..bfba4e39a6c5 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -74,9 +74,7 @@ struct txx9ndfmc_drvdata {
74 unsigned char hold; /* in gbusclock */ 74 unsigned char hold; /* in gbusclock */
75 unsigned char spw; /* in gbusclock */ 75 unsigned char spw; /* in gbusclock */
76 struct nand_hw_control hw_control; 76 struct nand_hw_control hw_control;
77#ifdef CONFIG_MTD_PARTITIONS
78 struct mtd_partition *parts[MAX_TXX9NDFMC_DEV]; 77 struct mtd_partition *parts[MAX_TXX9NDFMC_DEV];
79#endif
80}; 78};
81 79
82static struct platform_device *mtd_to_platdev(struct mtd_info *mtd) 80static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
@@ -289,9 +287,7 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
289static int __init txx9ndfmc_probe(struct platform_device *dev) 287static int __init txx9ndfmc_probe(struct platform_device *dev)
290{ 288{
291 struct txx9ndfmc_platform_data *plat = dev->dev.platform_data; 289 struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
292#ifdef CONFIG_MTD_PARTITIONS
293 static const char *probes[] = { "cmdlinepart", NULL }; 290 static const char *probes[] = { "cmdlinepart", NULL };
294#endif
295 int hold, spw; 291 int hold, spw;
296 int i; 292 int i;
297 struct txx9ndfmc_drvdata *drvdata; 293 struct txx9ndfmc_drvdata *drvdata;
@@ -337,9 +333,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
337 struct txx9ndfmc_priv *txx9_priv; 333 struct txx9ndfmc_priv *txx9_priv;
338 struct nand_chip *chip; 334 struct nand_chip *chip;
339 struct mtd_info *mtd; 335 struct mtd_info *mtd;
340#ifdef CONFIG_MTD_PARTITIONS
341 int nr_parts; 336 int nr_parts;
342#endif
343 337
344 if (!(plat->ch_mask & (1 << i))) 338 if (!(plat->ch_mask & (1 << i)))
345 continue; 339 continue;
@@ -399,13 +393,9 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
399 } 393 }
400 mtd->name = txx9_priv->mtdname; 394 mtd->name = txx9_priv->mtdname;
401 395
402#ifdef CONFIG_MTD_PARTITIONS
403 nr_parts = parse_mtd_partitions(mtd, probes, 396 nr_parts = parse_mtd_partitions(mtd, probes,
404 &drvdata->parts[i], 0); 397 &drvdata->parts[i], 0);
405 if (nr_parts > 0) 398 mtd_device_register(mtd, drvdata->parts[i], nr_parts);
406 add_mtd_partitions(mtd, drvdata->parts[i], nr_parts);
407#endif
408 add_mtd_device(mtd);
409 drvdata->mtds[i] = mtd; 399 drvdata->mtds[i] = mtd;
410 } 400 }
411 401
@@ -431,9 +421,7 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
431 txx9_priv = chip->priv; 421 txx9_priv = chip->priv;
432 422
433 nand_release(mtd); 423 nand_release(mtd);
434#ifdef CONFIG_MTD_PARTITIONS
435 kfree(drvdata->parts[i]); 424 kfree(drvdata->parts[i]);
436#endif
437 kfree(txx9_priv->mtdname); 425 kfree(txx9_priv->mtdname);
438 kfree(txx9_priv); 426 kfree(txx9_priv);
439 } 427 }
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 4f426195f8db..772ad2966619 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -1,7 +1,6 @@
1menuconfig MTD_ONENAND 1menuconfig MTD_ONENAND
2 tristate "OneNAND Device Support" 2 tristate "OneNAND Device Support"
3 depends on MTD 3 depends on MTD
4 select MTD_PARTITIONS
5 help 4 help
6 This enables support for accessing all type of OneNAND flash 5 This enables support for accessing all type of OneNAND flash
7 devices. For further information see 6 devices. For further information see
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index ac08750748a3..2d70d354d846 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -30,9 +30,7 @@
30 */ 30 */
31#define DRIVER_NAME "onenand-flash" 31#define DRIVER_NAME "onenand-flash"
32 32
33#ifdef CONFIG_MTD_PARTITIONS
34static const char *part_probes[] = { "cmdlinepart", NULL, }; 33static const char *part_probes[] = { "cmdlinepart", NULL, };
35#endif
36 34
37struct onenand_info { 35struct onenand_info {
38 struct mtd_info mtd; 36 struct mtd_info mtd;
@@ -75,15 +73,13 @@ static int __devinit generic_onenand_probe(struct platform_device *pdev)
75 goto out_iounmap; 73 goto out_iounmap;
76 } 74 }
77 75
78#ifdef CONFIG_MTD_PARTITIONS
79 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); 76 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
80 if (err > 0) 77 if (err > 0)
81 add_mtd_partitions(&info->mtd, info->parts, err); 78 mtd_device_register(&info->mtd, info->parts, err);
82 else if (err <= 0 && pdata && pdata->parts) 79 else if (err <= 0 && pdata && pdata->parts)
83 add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts); 80 mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
84 else 81 else
85#endif 82 err = mtd_device_register(&info->mtd, NULL, 0);
86 err = add_mtd_device(&info->mtd);
87 83
88 platform_set_drvdata(pdev, info); 84 platform_set_drvdata(pdev, info);
89 85
@@ -108,11 +104,7 @@ static int __devexit generic_onenand_remove(struct platform_device *pdev)
108 platform_set_drvdata(pdev, NULL); 104 platform_set_drvdata(pdev, NULL);
109 105
110 if (info) { 106 if (info) {
111 if (info->parts) 107 mtd_device_unregister(&info->mtd);
112 del_mtd_partitions(&info->mtd);
113 else
114 del_mtd_device(&info->mtd);
115
116 onenand_release(&info->mtd); 108 onenand_release(&info->mtd);
117 release_mem_region(res->start, size); 109 release_mem_region(res->start, size);
118 iounmap(info->onenand.base); 110 iounmap(info->onenand.base);
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 1fcb41adab07..a916dec29215 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -67,9 +67,7 @@ struct omap2_onenand {
67 struct regulator *regulator; 67 struct regulator *regulator;
68}; 68};
69 69
70#ifdef CONFIG_MTD_PARTITIONS
71static const char *part_probes[] = { "cmdlinepart", NULL, }; 70static const char *part_probes[] = { "cmdlinepart", NULL, };
72#endif
73 71
74static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data) 72static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
75{ 73{
@@ -755,15 +753,13 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
755 if ((r = onenand_scan(&c->mtd, 1)) < 0) 753 if ((r = onenand_scan(&c->mtd, 1)) < 0)
756 goto err_release_regulator; 754 goto err_release_regulator;
757 755
758#ifdef CONFIG_MTD_PARTITIONS
759 r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0); 756 r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
760 if (r > 0) 757 if (r > 0)
761 r = add_mtd_partitions(&c->mtd, c->parts, r); 758 r = mtd_device_register(&c->mtd, c->parts, r);
762 else if (pdata->parts != NULL) 759 else if (pdata->parts != NULL)
763 r = add_mtd_partitions(&c->mtd, pdata->parts, pdata->nr_parts); 760 r = mtd_device_register(&c->mtd, pdata->parts, pdata->nr_parts);
764 else 761 else
765#endif 762 r = mtd_device_register(&c->mtd, NULL, 0);
766 r = add_mtd_device(&c->mtd);
767 if (r) 763 if (r)
768 goto err_release_onenand; 764 goto err_release_onenand;
769 765
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 56a8b2005bda..ac9e959802a7 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -65,11 +65,11 @@ MODULE_PARM_DESC(otp, "Corresponding behaviour of OneNAND in OTP"
65 " : 2 -> 1st Block lock" 65 " : 2 -> 1st Block lock"
66 " : 3 -> BOTH OTP Block and 1st Block lock"); 66 " : 3 -> BOTH OTP Block and 1st Block lock");
67 67
68/** 68/*
69 * onenand_oob_128 - oob info for Flex-Onenand with 4KB page 69 * flexonenand_oob_128 - oob info for Flex-Onenand with 4KB page
70 * For now, we expose only 64 out of 80 ecc bytes 70 * For now, we expose only 64 out of 80 ecc bytes
71 */ 71 */
72static struct nand_ecclayout onenand_oob_128 = { 72static struct nand_ecclayout flexonenand_oob_128 = {
73 .eccbytes = 64, 73 .eccbytes = 64,
74 .eccpos = { 74 .eccpos = {
75 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 75 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
@@ -86,6 +86,35 @@ static struct nand_ecclayout onenand_oob_128 = {
86 } 86 }
87}; 87};
88 88
89/*
90 * onenand_oob_128 - oob info for OneNAND with 4KB page
91 *
92 * Based on specification:
93 * 4Gb M-die OneNAND Flash (KFM4G16Q4M, KFN8G16Q4M). Rev. 1.3, Apr. 2010
94 *
95 * For eccpos we expose only 64 bytes out of 72 (see struct nand_ecclayout)
96 *
97 * oobfree uses the spare area fields marked as
98 * "Managed by internal ECC logic for Logical Sector Number area"
99 */
100static struct nand_ecclayout onenand_oob_128 = {
101 .eccbytes = 64,
102 .eccpos = {
103 7, 8, 9, 10, 11, 12, 13, 14, 15,
104 23, 24, 25, 26, 27, 28, 29, 30, 31,
105 39, 40, 41, 42, 43, 44, 45, 46, 47,
106 55, 56, 57, 58, 59, 60, 61, 62, 63,
107 71, 72, 73, 74, 75, 76, 77, 78, 79,
108 87, 88, 89, 90, 91, 92, 93, 94, 95,
109 103, 104, 105, 106, 107, 108, 109, 110, 111,
110 119
111 },
112 .oobfree = {
113 {2, 3}, {18, 3}, {34, 3}, {50, 3},
114 {66, 3}, {82, 3}, {98, 3}, {114, 3}
115 }
116};
117
89/** 118/**
90 * onenand_oob_64 - oob info for large (2KB) page 119 * onenand_oob_64 - oob info for large (2KB) page
91 */ 120 */
@@ -2424,7 +2453,7 @@ static int onenand_block_by_block_erase(struct mtd_info *mtd,
2424 len -= block_size; 2453 len -= block_size;
2425 addr += block_size; 2454 addr += block_size;
2426 2455
2427 if (addr == region_end) { 2456 if (region && addr == region_end) {
2428 if (!len) 2457 if (!len)
2429 break; 2458 break;
2430 region++; 2459 region++;
@@ -4018,8 +4047,13 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
4018 */ 4047 */
4019 switch (mtd->oobsize) { 4048 switch (mtd->oobsize) {
4020 case 128: 4049 case 128:
4021 this->ecclayout = &onenand_oob_128; 4050 if (FLEXONENAND(this)) {
4022 mtd->subpage_sft = 0; 4051 this->ecclayout = &flexonenand_oob_128;
4052 mtd->subpage_sft = 0;
4053 } else {
4054 this->ecclayout = &onenand_oob_128;
4055 mtd->subpage_sft = 2;
4056 }
4023 break; 4057 break;
4024 case 64: 4058 case 64:
4025 this->ecclayout = &onenand_oob_64; 4059 this->ecclayout = &onenand_oob_64;
@@ -4108,12 +4142,8 @@ void onenand_release(struct mtd_info *mtd)
4108{ 4142{
4109 struct onenand_chip *this = mtd->priv; 4143 struct onenand_chip *this = mtd->priv;
4110 4144
4111#ifdef CONFIG_MTD_PARTITIONS
4112 /* Deregister partitions */ 4145 /* Deregister partitions */
4113 del_mtd_partitions (mtd); 4146 mtd_device_unregister(mtd);
4114#endif
4115 /* Deregister the device */
4116 del_mtd_device (mtd);
4117 4147
4118 /* Free bad block table memory, if allocated */ 4148 /* Free bad block table memory, if allocated */
4119 if (this->bbm) { 4149 if (this->bbm) {
diff --git a/drivers/mtd/onenand/onenand_sim.c b/drivers/mtd/onenand/onenand_sim.c
index 5ef3bd547772..85399e3accda 100644
--- a/drivers/mtd/onenand/onenand_sim.c
+++ b/drivers/mtd/onenand/onenand_sim.c
@@ -539,7 +539,8 @@ static int __init onenand_sim_init(void)
539 return -ENXIO; 539 return -ENXIO;
540 } 540 }
541 541
542 add_mtd_partitions(&info->mtd, info->parts, ARRAY_SIZE(os_partitions)); 542 mtd_device_register(&info->mtd, info->parts,
543 ARRAY_SIZE(os_partitions));
543 544
544 return 0; 545 return 0;
545} 546}
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index a4c74a9ba430..3306b5b3c736 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -147,9 +147,7 @@ struct s3c_onenand {
147 struct resource *dma_res; 147 struct resource *dma_res;
148 unsigned long phys_base; 148 unsigned long phys_base;
149 struct completion complete; 149 struct completion complete;
150#ifdef CONFIG_MTD_PARTITIONS
151 struct mtd_partition *parts; 150 struct mtd_partition *parts;
152#endif
153}; 151};
154 152
155#define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1))) 153#define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1)))
@@ -159,9 +157,7 @@ struct s3c_onenand {
159 157
160static struct s3c_onenand *onenand; 158static struct s3c_onenand *onenand;
161 159
162#ifdef CONFIG_MTD_PARTITIONS
163static const char *part_probes[] = { "cmdlinepart", NULL, }; 160static const char *part_probes[] = { "cmdlinepart", NULL, };
164#endif
165 161
166static inline int s3c_read_reg(int offset) 162static inline int s3c_read_reg(int offset)
167{ 163{
@@ -1021,15 +1017,13 @@ static int s3c_onenand_probe(struct platform_device *pdev)
1021 if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ) 1017 if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
1022 dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n"); 1018 dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
1023 1019
1024#ifdef CONFIG_MTD_PARTITIONS
1025 err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0); 1020 err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0);
1026 if (err > 0) 1021 if (err > 0)
1027 add_mtd_partitions(mtd, onenand->parts, err); 1022 mtd_device_register(mtd, onenand->parts, err);
1028 else if (err <= 0 && pdata && pdata->parts) 1023 else if (err <= 0 && pdata && pdata->parts)
1029 add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); 1024 mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
1030 else 1025 else
1031#endif 1026 err = mtd_device_register(mtd, NULL, 0);
1032 err = add_mtd_device(mtd);
1033 1027
1034 platform_set_drvdata(pdev, mtd); 1028 platform_set_drvdata(pdev, mtd);
1035 1029
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 9aa81584c8a2..941bc3c05d6e 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -365,7 +365,7 @@ static int gluebi_create(struct ubi_device_info *di,
365 vi->vol_id); 365 vi->vol_id);
366 mutex_unlock(&devices_mutex); 366 mutex_unlock(&devices_mutex);
367 367
368 if (add_mtd_device(mtd)) { 368 if (mtd_device_register(mtd, NULL, 0)) {
369 err_msg("cannot add MTD device"); 369 err_msg("cannot add MTD device");
370 kfree(mtd->name); 370 kfree(mtd->name);
371 kfree(gluebi); 371 kfree(gluebi);
@@ -407,7 +407,7 @@ static int gluebi_remove(struct ubi_volume_info *vi)
407 return err; 407 return err;
408 408
409 mtd = &gluebi->mtd; 409 mtd = &gluebi->mtd;
410 err = del_mtd_device(mtd); 410 err = mtd_device_unregister(mtd);
411 if (err) { 411 if (err) {
412 err_msg("cannot remove fake MTD device %d, UBI device %d, " 412 err_msg("cannot remove fake MTD device %d, UBI device %d, "
413 "volume %d, error %d", mtd->index, gluebi->ubi_num, 413 "volume %d, error %d", mtd->index, gluebi->ubi_num,
@@ -524,7 +524,7 @@ static void __exit ubi_gluebi_exit(void)
524 int err; 524 int err;
525 struct mtd_info *mtd = &gluebi->mtd; 525 struct mtd_info *mtd = &gluebi->mtd;
526 526
527 err = del_mtd_device(mtd); 527 err = mtd_device_unregister(mtd);
528 if (err) 528 if (err)
529 err_msg("error %d while removing gluebi MTD device %d, " 529 err_msg("error %d while removing gluebi MTD device %d, "
530 "UBI device %d, volume %d - ignoring", err, 530 "UBI device %d, volume %d - ignoring", err,
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6141667c5fb7..17b4dd94da90 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -113,9 +113,11 @@ MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
113module_param(tx_queues, int, 0); 113module_param(tx_queues, int, 0);
114MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)"); 114MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
115module_param_named(num_grat_arp, num_peer_notif, int, 0644); 115module_param_named(num_grat_arp, num_peer_notif, int, 0644);
116MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on failover event (alias of num_unsol_na)"); 116MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
117 "failover event (alias of num_unsol_na)");
117module_param_named(num_unsol_na, num_peer_notif, int, 0644); 118module_param_named(num_unsol_na, num_peer_notif, int, 0644);
118MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on failover event (alias of num_grat_arp)"); 119MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
120 "failover event (alias of num_grat_arp)");
119module_param(miimon, int, 0); 121module_param(miimon, int, 0);
120MODULE_PARM_DESC(miimon, "Link check interval in milliseconds"); 122MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
121module_param(updelay, int, 0); 123module_param(updelay, int, 0);
@@ -127,7 +129,7 @@ module_param(use_carrier, int, 0);
127MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; " 129MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
128 "0 for off, 1 for on (default)"); 130 "0 for off, 1 for on (default)");
129module_param(mode, charp, 0); 131module_param(mode, charp, 0);
130MODULE_PARM_DESC(mode, "Mode of operation : 0 for balance-rr, " 132MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
131 "1 for active-backup, 2 for balance-xor, " 133 "1 for active-backup, 2 for balance-xor, "
132 "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, " 134 "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
133 "6 for balance-alb"); 135 "6 for balance-alb");
@@ -142,27 +144,35 @@ MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
142 "2 for only on active slave " 144 "2 for only on active slave "
143 "failure"); 145 "failure");
144module_param(lacp_rate, charp, 0); 146module_param(lacp_rate, charp, 0);
145MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner " 147MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
146 "(slow/fast)"); 148 "0 for slow, 1 for fast");
147module_param(ad_select, charp, 0); 149module_param(ad_select, charp, 0);
148MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic: stable (0, default), bandwidth (1), count (2)"); 150MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; "
151 "0 for stable (default), 1 for bandwidth, "
152 "2 for count");
149module_param(xmit_hash_policy, charp, 0); 153module_param(xmit_hash_policy, charp, 0);
150MODULE_PARM_DESC(xmit_hash_policy, "XOR hashing method: 0 for layer 2 (default)" 154MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; "
151 ", 1 for layer 3+4"); 155 "0 for layer 2 (default), 1 for layer 3+4, "
156 "2 for layer 2+3");
152module_param(arp_interval, int, 0); 157module_param(arp_interval, int, 0);
153MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds"); 158MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
154module_param_array(arp_ip_target, charp, NULL, 0); 159module_param_array(arp_ip_target, charp, NULL, 0);
155MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form"); 160MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
156module_param(arp_validate, charp, 0); 161module_param(arp_validate, charp, 0);
157MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all"); 162MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
163 "0 for none (default), 1 for active, "
164 "2 for backup, 3 for all");
158module_param(fail_over_mac, charp, 0); 165module_param(fail_over_mac, charp, 0);
159MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC. none (default), active or follow"); 166MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
167 "the same MAC; 0 for none (default), "
168 "1 for active, 2 for follow");
160module_param(all_slaves_active, int, 0); 169module_param(all_slaves_active, int, 0);
161MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface" 170MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface"
162 "by setting active flag for all slaves. " 171 "by setting active flag for all slaves; "
163 "0 for never (default), 1 for always."); 172 "0 for never (default), 1 for always.");
164module_param(resend_igmp, int, 0); 173module_param(resend_igmp, int, 0);
165MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link failure"); 174MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
175 "link failure");
166 176
167/*----------------------------- Global variables ----------------------------*/ 177/*----------------------------- Global variables ----------------------------*/
168 178
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 807b6bb200eb..29a4f06fbfcf 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1772,7 +1772,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
1772 /* obtain emac clock from kernel */ 1772 /* obtain emac clock from kernel */
1773 emac_clk = clk_get(&pdev->dev, NULL); 1773 emac_clk = clk_get(&pdev->dev, NULL);
1774 if (IS_ERR(emac_clk)) { 1774 if (IS_ERR(emac_clk)) {
1775 printk(KERN_ERR "DaVinci EMAC: Failed to get EMAC clock\n"); 1775 dev_err(&pdev->dev, "failed to get EMAC clock\n");
1776 return -EBUSY; 1776 return -EBUSY;
1777 } 1777 }
1778 emac_bus_frequency = clk_get_rate(emac_clk); 1778 emac_bus_frequency = clk_get_rate(emac_clk);
@@ -1780,7 +1780,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
1780 1780
1781 ndev = alloc_etherdev(sizeof(struct emac_priv)); 1781 ndev = alloc_etherdev(sizeof(struct emac_priv));
1782 if (!ndev) { 1782 if (!ndev) {
1783 printk(KERN_ERR "DaVinci EMAC: Error allocating net_device\n"); 1783 dev_err(&pdev->dev, "error allocating net_device\n");
1784 clk_put(emac_clk); 1784 clk_put(emac_clk);
1785 return -ENOMEM; 1785 return -ENOMEM;
1786 } 1786 }
@@ -1795,7 +1795,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
1795 1795
1796 pdata = pdev->dev.platform_data; 1796 pdata = pdev->dev.platform_data;
1797 if (!pdata) { 1797 if (!pdata) {
1798 printk(KERN_ERR "DaVinci EMAC: No platform data\n"); 1798 dev_err(&pdev->dev, "no platform data\n");
1799 return -ENODEV; 1799 return -ENODEV;
1800 } 1800 }
1801 1801
@@ -1814,7 +1814,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
1814 /* Get EMAC platform data */ 1814 /* Get EMAC platform data */
1815 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1815 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1816 if (!res) { 1816 if (!res) {
1817 dev_err(emac_dev, "DaVinci EMAC: Error getting res\n"); 1817 dev_err(&pdev->dev,"error getting res\n");
1818 rc = -ENOENT; 1818 rc = -ENOENT;
1819 goto probe_quit; 1819 goto probe_quit;
1820 } 1820 }
@@ -1822,14 +1822,14 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
1822 priv->emac_base_phys = res->start + pdata->ctrl_reg_offset; 1822 priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
1823 size = res->end - res->start + 1; 1823 size = res->end - res->start + 1;
1824 if (!request_mem_region(res->start, size, ndev->name)) { 1824 if (!request_mem_region(res->start, size, ndev->name)) {
1825 dev_err(emac_dev, "DaVinci EMAC: failed request_mem_region() for regs\n"); 1825 dev_err(&pdev->dev, "failed request_mem_region() for regs\n");
1826 rc = -ENXIO; 1826 rc = -ENXIO;
1827 goto probe_quit; 1827 goto probe_quit;
1828 } 1828 }
1829 1829
1830 priv->remap_addr = ioremap(res->start, size); 1830 priv->remap_addr = ioremap(res->start, size);
1831 if (!priv->remap_addr) { 1831 if (!priv->remap_addr) {
1832 dev_err(emac_dev, "Unable to map IO\n"); 1832 dev_err(&pdev->dev, "unable to map IO\n");
1833 rc = -ENOMEM; 1833 rc = -ENOMEM;
1834 release_mem_region(res->start, size); 1834 release_mem_region(res->start, size);
1835 goto probe_quit; 1835 goto probe_quit;
@@ -1863,7 +1863,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
1863 1863
1864 priv->dma = cpdma_ctlr_create(&dma_params); 1864 priv->dma = cpdma_ctlr_create(&dma_params);
1865 if (!priv->dma) { 1865 if (!priv->dma) {
1866 dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n"); 1866 dev_err(&pdev->dev, "error initializing DMA\n");
1867 rc = -ENOMEM; 1867 rc = -ENOMEM;
1868 goto no_dma; 1868 goto no_dma;
1869 } 1869 }
@@ -1879,7 +1879,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
1879 1879
1880 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1880 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1881 if (!res) { 1881 if (!res) {
1882 dev_err(emac_dev, "DaVinci EMAC: Error getting irq res\n"); 1882 dev_err(&pdev->dev, "error getting irq res\n");
1883 rc = -ENOENT; 1883 rc = -ENOENT;
1884 goto no_irq_res; 1884 goto no_irq_res;
1885 } 1885 }
@@ -1888,8 +1888,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
1888 if (!is_valid_ether_addr(priv->mac_addr)) { 1888 if (!is_valid_ether_addr(priv->mac_addr)) {
1889 /* Use random MAC if none passed */ 1889 /* Use random MAC if none passed */
1890 random_ether_addr(priv->mac_addr); 1890 random_ether_addr(priv->mac_addr);
1891 printk(KERN_WARNING "%s: using random MAC addr: %pM\n", 1891 dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
1892 __func__, priv->mac_addr); 1892 priv->mac_addr);
1893 } 1893 }
1894 1894
1895 ndev->netdev_ops = &emac_netdev_ops; 1895 ndev->netdev_ops = &emac_netdev_ops;
@@ -1902,7 +1902,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
1902 SET_NETDEV_DEV(ndev, &pdev->dev); 1902 SET_NETDEV_DEV(ndev, &pdev->dev);
1903 rc = register_netdev(ndev); 1903 rc = register_netdev(ndev);
1904 if (rc) { 1904 if (rc) {
1905 dev_err(emac_dev, "DaVinci EMAC: Error in register_netdev\n"); 1905 dev_err(&pdev->dev, "error in register_netdev\n");
1906 rc = -ENODEV; 1906 rc = -ENODEV;
1907 goto netdev_reg_err; 1907 goto netdev_reg_err;
1908 } 1908 }
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index a3c0dc9d8b98..9537aaa50c2f 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -69,7 +69,7 @@ static const char paranoia_str[] = KERN_ERR
69 69
70static const char bc_drvname[] = "baycom_epp"; 70static const char bc_drvname[] = "baycom_epp";
71static const char bc_drvinfo[] = KERN_INFO "baycom_epp: (C) 1998-2000 Thomas Sailer, HB9JNX/AE4WA\n" 71static const char bc_drvinfo[] = KERN_INFO "baycom_epp: (C) 1998-2000 Thomas Sailer, HB9JNX/AE4WA\n"
72"baycom_epp: version 0.7 compiled " __TIME__ " " __DATE__ "\n"; 72"baycom_epp: version 0.7\n";
73 73
74/* --------------------------------------------------------------------- */ 74/* --------------------------------------------------------------------- */
75 75
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index 5f5af9a606f8..279d2296290a 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -102,7 +102,7 @@
102 102
103static const char bc_drvname[] = "baycom_par"; 103static const char bc_drvname[] = "baycom_par";
104static const char bc_drvinfo[] = KERN_INFO "baycom_par: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n" 104static const char bc_drvinfo[] = KERN_INFO "baycom_par: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n"
105"baycom_par: version 0.9 compiled " __TIME__ " " __DATE__ "\n"; 105"baycom_par: version 0.9\n";
106 106
107/* --------------------------------------------------------------------- */ 107/* --------------------------------------------------------------------- */
108 108
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 3e25f10cabd6..99cdce33df8b 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -92,7 +92,7 @@
92 92
93static const char bc_drvname[] = "baycom_ser_fdx"; 93static const char bc_drvname[] = "baycom_ser_fdx";
94static const char bc_drvinfo[] = KERN_INFO "baycom_ser_fdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n" 94static const char bc_drvinfo[] = KERN_INFO "baycom_ser_fdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n"
95"baycom_ser_fdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n"; 95"baycom_ser_fdx: version 0.10\n";
96 96
97/* --------------------------------------------------------------------- */ 97/* --------------------------------------------------------------------- */
98 98
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index 1686f6dcbbce..d92fe6ca788f 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -80,7 +80,7 @@
80 80
81static const char bc_drvname[] = "baycom_ser_hdx"; 81static const char bc_drvname[] = "baycom_ser_hdx";
82static const char bc_drvinfo[] = KERN_INFO "baycom_ser_hdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n" 82static const char bc_drvinfo[] = KERN_INFO "baycom_ser_hdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n"
83"baycom_ser_hdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n"; 83"baycom_ser_hdx: version 0.10\n";
84 84
85/* --------------------------------------------------------------------- */ 85/* --------------------------------------------------------------------- */
86 86
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 5b37579e84b7..a4a3516b6bbf 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -749,7 +749,7 @@ EXPORT_SYMBOL(hdlcdrv_unregister);
749static int __init hdlcdrv_init_driver(void) 749static int __init hdlcdrv_init_driver(void)
750{ 750{
751 printk(KERN_INFO "hdlcdrv: (C) 1996-2000 Thomas Sailer HB9JNX/AE4WA\n"); 751 printk(KERN_INFO "hdlcdrv: (C) 1996-2000 Thomas Sailer HB9JNX/AE4WA\n");
752 printk(KERN_INFO "hdlcdrv: version 0.8 compiled " __TIME__ " " __DATE__ "\n"); 752 printk(KERN_INFO "hdlcdrv: version 0.8\n");
753 return 0; 753 return 0;
754} 754}
755 755
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index e646bfce2d84..b6304486f244 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -216,7 +216,7 @@ static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
216 int rc; 216 int rc;
217 217
218 for (;;) { 218 for (;;) {
219 rc = del_mtd_device(&part->mtd); 219 rc = mtd_device_unregister(&part->mtd);
220 if (rc != -EBUSY) 220 if (rc != -EBUSY)
221 break; 221 break;
222 ssleep(1); 222 ssleep(1);
@@ -268,7 +268,7 @@ static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
268 part->mtd.write = efx_mtd->ops->write; 268 part->mtd.write = efx_mtd->ops->write;
269 part->mtd.sync = efx_mtd_sync; 269 part->mtd.sync = efx_mtd_sync;
270 270
271 if (add_mtd_device(&part->mtd)) 271 if (mtd_device_register(&part->mtd, NULL, 0))
272 goto fail; 272 goto fail;
273 } 273 }
274 274
@@ -280,7 +280,7 @@ fail:
280 --part; 280 --part;
281 efx_mtd_remove_partition(part); 281 efx_mtd_remove_partition(part);
282 } 282 }
283 /* add_mtd_device() returns 1 if the MTD table is full */ 283 /* mtd_device_register() returns 1 if the MTD table is full */
284 return -ENOMEM; 284 return -ENOMEM;
285} 285}
286 286
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 737b59f1a8dc..9617d3d0ee39 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -3242,8 +3242,7 @@ static inline void show_version(void)
3242 rcsdate++; 3242 rcsdate++;
3243 tmp = strrchr(rcsdate, ' '); 3243 tmp = strrchr(rcsdate, ' ');
3244 *tmp = '\0'; 3244 *tmp = '\0';
3245 printk(KERN_INFO "Cyclades-PC300 driver %s %s (built %s %s)\n", 3245 printk(KERN_INFO "Cyclades-PC300 driver %s %s\n", rcsvers, rcsdate);
3246 rcsvers, rcsdate, __DATE__, __TIME__);
3247} /* show_version */ 3246} /* show_version */
3248 3247
3249static const struct net_device_ops cpc_netdev_ops = { 3248static const struct net_device_ops cpc_netdev_ops = {
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 8b63a691a9ed..65200af29c52 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -670,7 +670,7 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
670 670
671 pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname); 671 pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
672 672
673 if (depth != 1 || 673 if (depth != 1 || !data ||
674 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) 674 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
675 return 0; 675 return 0;
676 676
@@ -679,16 +679,16 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
679 /* Retrieve command line */ 679 /* Retrieve command line */
680 p = of_get_flat_dt_prop(node, "bootargs", &l); 680 p = of_get_flat_dt_prop(node, "bootargs", &l);
681 if (p != NULL && l > 0) 681 if (p != NULL && l > 0)
682 strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); 682 strlcpy(data, p, min((int)l, COMMAND_LINE_SIZE));
683 683
684#ifdef CONFIG_CMDLINE 684#ifdef CONFIG_CMDLINE
685#ifndef CONFIG_CMDLINE_FORCE 685#ifndef CONFIG_CMDLINE_FORCE
686 if (p == NULL || l == 0 || (l == 1 && (*p) == 0)) 686 if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
687#endif 687#endif
688 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 688 strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
689#endif /* CONFIG_CMDLINE */ 689#endif /* CONFIG_CMDLINE */
690 690
691 pr_debug("Command line is: %s\n", cmd_line); 691 pr_debug("Command line is: %s\n", (char*)data);
692 692
693 /* break now */ 693 /* break now */
694 return 1; 694 return 1;
diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c
index d3d7809af8bf..0dc34f12f92e 100644
--- a/drivers/parport/parport_ip32.c
+++ b/drivers/parport/parport_ip32.c
@@ -2203,7 +2203,6 @@ static __exit void parport_ip32_unregister_port(struct parport *p)
2203static int __init parport_ip32_init(void) 2203static int __init parport_ip32_init(void)
2204{ 2204{
2205 pr_info(PPIP32 "SGI IP32 built-in parallel port driver v0.6\n"); 2205 pr_info(PPIP32 "SGI IP32 built-in parallel port driver v0.6\n");
2206 pr_debug1(PPIP32 "Compiled on %s, %s\n", __DATE__, __TIME__);
2207 this_port = parport_ip32_probe_port(); 2206 this_port = parport_ip32_probe_port();
2208 return IS_ERR(this_port) ? PTR_ERR(this_port) : 0; 2207 return IS_ERR(this_port) ? PTR_ERR(this_port) : 0;
2209} 2208}
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 52a462fc6b84..e57b50b38565 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -68,6 +68,13 @@ config BATTERY_DS2760
68 help 68 help
69 Say Y here to enable support for batteries with ds2760 chip. 69 Say Y here to enable support for batteries with ds2760 chip.
70 70
71config BATTERY_DS2780
72 tristate "DS2780 battery driver"
73 select W1
74 select W1_SLAVE_DS2780
75 help
76 Say Y here to enable support for batteries with ds2780 chip.
77
71config BATTERY_DS2782 78config BATTERY_DS2782
72 tristate "DS2782/DS2786 standalone gas-gauge" 79 tristate "DS2782/DS2786 standalone gas-gauge"
73 depends on I2C 80 depends on I2C
@@ -203,6 +210,15 @@ config CHARGER_ISP1704
203 Say Y to enable support for USB Charger Detection with 210 Say Y to enable support for USB Charger Detection with
204 ISP1707/ISP1704 USB transceivers. 211 ISP1707/ISP1704 USB transceivers.
205 212
213config CHARGER_MAX8903
214 tristate "MAX8903 Battery DC-DC Charger for USB and Adapter Power"
215 depends on GENERIC_HARDIRQS
216 help
217 Say Y to enable support for the MAX8903 DC-DC charger and sysfs.
218 The driver supports controlling charger-enable and current-limit
219 pins based on the status of charger connections with interrupt
220 handlers.
221
206config CHARGER_TWL4030 222config CHARGER_TWL4030
207 tristate "OMAP TWL4030 BCI charger driver" 223 tristate "OMAP TWL4030 BCI charger driver"
208 depends on TWL4030_CORE 224 depends on TWL4030_CORE
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 8385bfae8728..009a90fa8ac9 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_WM8350_POWER) += wm8350_power.o
15obj-$(CONFIG_TEST_POWER) += test_power.o 15obj-$(CONFIG_TEST_POWER) += test_power.o
16 16
17obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o 17obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
18obj-$(CONFIG_BATTERY_DS2780) += ds2780_battery.o
18obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o 19obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o
19obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o 20obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
20obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o 21obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
@@ -32,5 +33,6 @@ obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
32obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o 33obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o
33obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o 34obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o
34obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o 35obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
36obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o
35obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o 37obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
36obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o 38obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index 59e68dbd028b..bb16f5b7e167 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -4,6 +4,7 @@
4 * Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it> 4 * Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it>
5 * Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it> 5 * Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it>
6 * Copyright (C) 2010-2011 Lars-Peter Clausen <lars@metafoo.de> 6 * Copyright (C) 2010-2011 Lars-Peter Clausen <lars@metafoo.de>
7 * Copyright (C) 2011 Pali Rohár <pali.rohar@gmail.com>
7 * 8 *
8 * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc. 9 * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc.
9 * 10 *
@@ -76,7 +77,7 @@ struct bq27x00_reg_cache {
76 int time_to_empty_avg; 77 int time_to_empty_avg;
77 int time_to_full; 78 int time_to_full;
78 int charge_full; 79 int charge_full;
79 int charge_counter; 80 int cycle_count;
80 int capacity; 81 int capacity;
81 int flags; 82 int flags;
82 83
@@ -115,7 +116,7 @@ static enum power_supply_property bq27x00_battery_props[] = {
115 POWER_SUPPLY_PROP_CHARGE_FULL, 116 POWER_SUPPLY_PROP_CHARGE_FULL,
116 POWER_SUPPLY_PROP_CHARGE_NOW, 117 POWER_SUPPLY_PROP_CHARGE_NOW,
117 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, 118 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
118 POWER_SUPPLY_PROP_CHARGE_COUNTER, 119 POWER_SUPPLY_PROP_CYCLE_COUNT,
119 POWER_SUPPLY_PROP_ENERGY_NOW, 120 POWER_SUPPLY_PROP_ENERGY_NOW,
120}; 121};
121 122
@@ -267,7 +268,7 @@ static void bq27x00_update(struct bq27x00_device_info *di)
267 cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP); 268 cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP);
268 cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF); 269 cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF);
269 cache.charge_full = bq27x00_battery_read_lmd(di); 270 cache.charge_full = bq27x00_battery_read_lmd(di);
270 cache.charge_counter = bq27x00_battery_read_cyct(di); 271 cache.cycle_count = bq27x00_battery_read_cyct(di);
271 272
272 if (!is_bq27500) 273 if (!is_bq27500)
273 cache.current_now = bq27x00_read(di, BQ27x00_REG_AI, false); 274 cache.current_now = bq27x00_read(di, BQ27x00_REG_AI, false);
@@ -496,8 +497,8 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
496 case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: 497 case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
497 ret = bq27x00_simple_value(di->charge_design_full, val); 498 ret = bq27x00_simple_value(di->charge_design_full, val);
498 break; 499 break;
499 case POWER_SUPPLY_PROP_CHARGE_COUNTER: 500 case POWER_SUPPLY_PROP_CYCLE_COUNT:
500 ret = bq27x00_simple_value(di->cache.charge_counter, val); 501 ret = bq27x00_simple_value(di->cache.cycle_count, val);
501 break; 502 break;
502 case POWER_SUPPLY_PROP_ENERGY_NOW: 503 case POWER_SUPPLY_PROP_ENERGY_NOW:
503 ret = bq27x00_battery_energy(di, val); 504 ret = bq27x00_battery_energy(di, val);
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index e534290f3256..f2c9cc33c0f9 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -86,7 +86,11 @@ static int rated_capacities[] = {
86 920, /* NEC */ 86 920, /* NEC */
87 1440, /* Samsung */ 87 1440, /* Samsung */
88 1440, /* BYD */ 88 1440, /* BYD */
89#ifdef CONFIG_MACH_H4700
90 1800, /* HP iPAQ hx4700 3.7V 1800mAh (359113-001) */
91#else
89 1440, /* Lishen */ 92 1440, /* Lishen */
93#endif
90 1440, /* NEC */ 94 1440, /* NEC */
91 2880, /* Samsung */ 95 2880, /* Samsung */
92 2880, /* BYD */ 96 2880, /* BYD */
@@ -186,7 +190,7 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di)
186 190
187 scale[0] = di->full_active_uAh; 191 scale[0] = di->full_active_uAh;
188 for (i = 1; i < 5; i++) 192 for (i = 1; i < 5; i++)
189 scale[i] = scale[i - 1] + di->raw[DS2760_ACTIVE_FULL + 2 + i]; 193 scale[i] = scale[i - 1] + di->raw[DS2760_ACTIVE_FULL + 1 + i];
190 194
191 di->full_active_uAh = battery_interpolate(scale, di->temp_C / 10); 195 di->full_active_uAh = battery_interpolate(scale, di->temp_C / 10);
192 di->full_active_uAh *= 1000; /* convert to µAh */ 196 di->full_active_uAh *= 1000; /* convert to µAh */
diff --git a/drivers/power/ds2780_battery.c b/drivers/power/ds2780_battery.c
new file mode 100644
index 000000000000..1fefe82e12e3
--- /dev/null
+++ b/drivers/power/ds2780_battery.c
@@ -0,0 +1,853 @@
1/*
2 * 1-wire client/driver for the Maxim/Dallas DS2780 Stand-Alone Fuel Gauge IC
3 *
4 * Copyright (C) 2010 Indesign, LLC
5 *
6 * Author: Clifton Barnes <cabarnes@indesign-llc.com>
7 *
8 * Based on ds2760_battery and ds2782_battery drivers
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/param.h>
19#include <linux/pm.h>
20#include <linux/platform_device.h>
21#include <linux/power_supply.h>
22#include <linux/idr.h>
23
24#include "../w1/w1.h"
25#include "../w1/slaves/w1_ds2780.h"
26
27/* Current unit measurement in uA for a 1 milli-ohm sense resistor */
28#define DS2780_CURRENT_UNITS 1563
29/* Charge unit measurement in uAh for a 1 milli-ohm sense resistor */
30#define DS2780_CHARGE_UNITS 6250
31/* Number of bytes in user EEPROM space */
32#define DS2780_USER_EEPROM_SIZE (DS2780_EEPROM_BLOCK0_END - \
33 DS2780_EEPROM_BLOCK0_START + 1)
34/* Number of bytes in parameter EEPROM space */
35#define DS2780_PARAM_EEPROM_SIZE (DS2780_EEPROM_BLOCK1_END - \
36 DS2780_EEPROM_BLOCK1_START + 1)
37
38struct ds2780_device_info {
39 struct device *dev;
40 struct power_supply bat;
41 struct device *w1_dev;
42};
43
44enum current_types {
45 CURRENT_NOW,
46 CURRENT_AVG,
47};
48
49static const char model[] = "DS2780";
50static const char manufacturer[] = "Maxim/Dallas";
51
52static inline struct ds2780_device_info *to_ds2780_device_info(
53 struct power_supply *psy)
54{
55 return container_of(psy, struct ds2780_device_info, bat);
56}
57
58static inline struct power_supply *to_power_supply(struct device *dev)
59{
60 return dev_get_drvdata(dev);
61}
62
63static inline int ds2780_read8(struct device *dev, u8 *val, int addr)
64{
65 return w1_ds2780_io(dev, val, addr, sizeof(u8), 0);
66}
67
68static int ds2780_read16(struct device *dev, s16 *val, int addr)
69{
70 int ret;
71 u8 raw[2];
72
73 ret = w1_ds2780_io(dev, raw, addr, sizeof(u8) * 2, 0);
74 if (ret < 0)
75 return ret;
76
77 *val = (raw[0] << 8) | raw[1];
78
79 return 0;
80}
81
82static inline int ds2780_read_block(struct device *dev, u8 *val, int addr,
83 size_t count)
84{
85 return w1_ds2780_io(dev, val, addr, count, 0);
86}
87
88static inline int ds2780_write(struct device *dev, u8 *val, int addr,
89 size_t count)
90{
91 return w1_ds2780_io(dev, val, addr, count, 1);
92}
93
94static inline int ds2780_store_eeprom(struct device *dev, int addr)
95{
96 return w1_ds2780_eeprom_cmd(dev, addr, W1_DS2780_COPY_DATA);
97}
98
99static inline int ds2780_recall_eeprom(struct device *dev, int addr)
100{
101 return w1_ds2780_eeprom_cmd(dev, addr, W1_DS2780_RECALL_DATA);
102}
103
104static int ds2780_save_eeprom(struct ds2780_device_info *dev_info, int reg)
105{
106 int ret;
107
108 ret = ds2780_store_eeprom(dev_info->w1_dev, reg);
109 if (ret < 0)
110 return ret;
111
112 ret = ds2780_recall_eeprom(dev_info->w1_dev, reg);
113 if (ret < 0)
114 return ret;
115
116 return 0;
117}
118
119/* Set sense resistor value in mhos */
120static int ds2780_set_sense_register(struct ds2780_device_info *dev_info,
121 u8 conductance)
122{
123 int ret;
124
125 ret = ds2780_write(dev_info->w1_dev, &conductance,
126 DS2780_RSNSP_REG, sizeof(u8));
127 if (ret < 0)
128 return ret;
129
130 return ds2780_save_eeprom(dev_info, DS2780_RSNSP_REG);
131}
132
133/* Get RSGAIN value from 0 to 1.999 in steps of 0.001 */
134static int ds2780_get_rsgain_register(struct ds2780_device_info *dev_info,
135 u16 *rsgain)
136{
137 return ds2780_read16(dev_info->w1_dev, rsgain, DS2780_RSGAIN_MSB_REG);
138}
139
140/* Set RSGAIN value from 0 to 1.999 in steps of 0.001 */
141static int ds2780_set_rsgain_register(struct ds2780_device_info *dev_info,
142 u16 rsgain)
143{
144 int ret;
145 u8 raw[] = {rsgain >> 8, rsgain & 0xFF};
146
147 ret = ds2780_write(dev_info->w1_dev, raw,
148 DS2780_RSGAIN_MSB_REG, sizeof(u8) * 2);
149 if (ret < 0)
150 return ret;
151
152 return ds2780_save_eeprom(dev_info, DS2780_RSGAIN_MSB_REG);
153}
154
155static int ds2780_get_voltage(struct ds2780_device_info *dev_info,
156 int *voltage_uV)
157{
158 int ret;
159 s16 voltage_raw;
160
161 /*
162 * The voltage value is located in 10 bits across the voltage MSB
163 * and LSB registers in two's compliment form
164 * Sign bit of the voltage value is in bit 7 of the voltage MSB register
165 * Bits 9 - 3 of the voltage value are in bits 6 - 0 of the
166 * voltage MSB register
167 * Bits 2 - 0 of the voltage value are in bits 7 - 5 of the
168 * voltage LSB register
169 */
170 ret = ds2780_read16(dev_info->w1_dev, &voltage_raw,
171 DS2780_VOLT_MSB_REG);
172 if (ret < 0)
173 return ret;
174
175 /*
176 * DS2780 reports voltage in units of 4.88mV, but the battery class
177 * reports in units of uV, so convert by multiplying by 4880.
178 */
179 *voltage_uV = (voltage_raw / 32) * 4880;
180 return 0;
181}
182
183static int ds2780_get_temperature(struct ds2780_device_info *dev_info,
184 int *temperature)
185{
186 int ret;
187 s16 temperature_raw;
188
189 /*
190 * The temperature value is located in 10 bits across the temperature
191 * MSB and LSB registers in two's compliment form
192 * Sign bit of the temperature value is in bit 7 of the temperature
193 * MSB register
194 * Bits 9 - 3 of the temperature value are in bits 6 - 0 of the
195 * temperature MSB register
196 * Bits 2 - 0 of the temperature value are in bits 7 - 5 of the
197 * temperature LSB register
198 */
199 ret = ds2780_read16(dev_info->w1_dev, &temperature_raw,
200 DS2780_TEMP_MSB_REG);
201 if (ret < 0)
202 return ret;
203
204 /*
205 * Temperature is measured in units of 0.125 degrees celcius, the
206 * power_supply class measures temperature in tenths of degrees
207 * celsius. The temperature value is stored as a 10 bit number, plus
208 * sign in the upper bits of a 16 bit register.
209 */
210 *temperature = ((temperature_raw / 32) * 125) / 100;
211 return 0;
212}
213
214static int ds2780_get_current(struct ds2780_device_info *dev_info,
215 enum current_types type, int *current_uA)
216{
217 int ret, sense_res;
218 s16 current_raw;
219 u8 sense_res_raw, reg_msb;
220
221 /*
222 * The units of measurement for current are dependent on the value of
223 * the sense resistor.
224 */
225 ret = ds2780_read8(dev_info->w1_dev, &sense_res_raw, DS2780_RSNSP_REG);
226 if (ret < 0)
227 return ret;
228
229 if (sense_res_raw == 0) {
230 dev_err(dev_info->dev, "sense resistor value is 0\n");
231 return -ENXIO;
232 }
233 sense_res = 1000 / sense_res_raw;
234
235 if (type == CURRENT_NOW)
236 reg_msb = DS2780_CURRENT_MSB_REG;
237 else if (type == CURRENT_AVG)
238 reg_msb = DS2780_IAVG_MSB_REG;
239 else
240 return -EINVAL;
241
242 /*
243 * The current value is located in 16 bits across the current MSB
244 * and LSB registers in two's compliment form
245 * Sign bit of the current value is in bit 7 of the current MSB register
246 * Bits 14 - 8 of the current value are in bits 6 - 0 of the current
247 * MSB register
248 * Bits 7 - 0 of the current value are in bits 7 - 0 of the current
249 * LSB register
250 */
251 ret = ds2780_read16(dev_info->w1_dev, &current_raw, reg_msb);
252 if (ret < 0)
253 return ret;
254
255 *current_uA = current_raw * (DS2780_CURRENT_UNITS / sense_res);
256 return 0;
257}
258
259static int ds2780_get_accumulated_current(struct ds2780_device_info *dev_info,
260 int *accumulated_current)
261{
262 int ret, sense_res;
263 s16 current_raw;
264 u8 sense_res_raw;
265
266 /*
267 * The units of measurement for accumulated current are dependent on
268 * the value of the sense resistor.
269 */
270 ret = ds2780_read8(dev_info->w1_dev, &sense_res_raw, DS2780_RSNSP_REG);
271 if (ret < 0)
272 return ret;
273
274 if (sense_res_raw == 0) {
275 dev_err(dev_info->dev, "sense resistor value is 0\n");
276 return -ENXIO;
277 }
278 sense_res = 1000 / sense_res_raw;
279
280 /*
281 * The ACR value is located in 16 bits across the ACR MSB and
282 * LSB registers
283 * Bits 15 - 8 of the ACR value are in bits 7 - 0 of the ACR
284 * MSB register
285 * Bits 7 - 0 of the ACR value are in bits 7 - 0 of the ACR
286 * LSB register
287 */
288 ret = ds2780_read16(dev_info->w1_dev, &current_raw, DS2780_ACR_MSB_REG);
289 if (ret < 0)
290 return ret;
291
292 *accumulated_current = current_raw * (DS2780_CHARGE_UNITS / sense_res);
293 return 0;
294}
295
296static int ds2780_get_capacity(struct ds2780_device_info *dev_info,
297 int *capacity)
298{
299 int ret;
300 u8 raw;
301
302 ret = ds2780_read8(dev_info->w1_dev, &raw, DS2780_RARC_REG);
303 if (ret < 0)
304 return ret;
305
306 *capacity = raw;
307 return raw;
308}
309
310static int ds2780_get_status(struct ds2780_device_info *dev_info, int *status)
311{
312 int ret, current_uA, capacity;
313
314 ret = ds2780_get_current(dev_info, CURRENT_NOW, &current_uA);
315 if (ret < 0)
316 return ret;
317
318 ret = ds2780_get_capacity(dev_info, &capacity);
319 if (ret < 0)
320 return ret;
321
322 if (capacity == 100)
323 *status = POWER_SUPPLY_STATUS_FULL;
324 else if (current_uA == 0)
325 *status = POWER_SUPPLY_STATUS_NOT_CHARGING;
326 else if (current_uA < 0)
327 *status = POWER_SUPPLY_STATUS_DISCHARGING;
328 else
329 *status = POWER_SUPPLY_STATUS_CHARGING;
330
331 return 0;
332}
333
334static int ds2780_get_charge_now(struct ds2780_device_info *dev_info,
335 int *charge_now)
336{
337 int ret;
338 u16 charge_raw;
339
340 /*
341 * The RAAC value is located in 16 bits across the RAAC MSB and
342 * LSB registers
343 * Bits 15 - 8 of the RAAC value are in bits 7 - 0 of the RAAC
344 * MSB register
345 * Bits 7 - 0 of the RAAC value are in bits 7 - 0 of the RAAC
346 * LSB register
347 */
348 ret = ds2780_read16(dev_info->w1_dev, &charge_raw, DS2780_RAAC_MSB_REG);
349 if (ret < 0)
350 return ret;
351
352 *charge_now = charge_raw * 1600;
353 return 0;
354}
355
356static int ds2780_get_control_register(struct ds2780_device_info *dev_info,
357 u8 *control_reg)
358{
359 return ds2780_read8(dev_info->w1_dev, control_reg, DS2780_CONTROL_REG);
360}
361
362static int ds2780_set_control_register(struct ds2780_device_info *dev_info,
363 u8 control_reg)
364{
365 int ret;
366
367 ret = ds2780_write(dev_info->w1_dev, &control_reg,
368 DS2780_CONTROL_REG, sizeof(u8));
369 if (ret < 0)
370 return ret;
371
372 return ds2780_save_eeprom(dev_info, DS2780_CONTROL_REG);
373}
374
375static int ds2780_battery_get_property(struct power_supply *psy,
376 enum power_supply_property psp,
377 union power_supply_propval *val)
378{
379 int ret = 0;
380 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
381
382 switch (psp) {
383 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
384 ret = ds2780_get_voltage(dev_info, &val->intval);
385 break;
386
387 case POWER_SUPPLY_PROP_TEMP:
388 ret = ds2780_get_temperature(dev_info, &val->intval);
389 break;
390
391 case POWER_SUPPLY_PROP_MODEL_NAME:
392 val->strval = model;
393 break;
394
395 case POWER_SUPPLY_PROP_MANUFACTURER:
396 val->strval = manufacturer;
397 break;
398
399 case POWER_SUPPLY_PROP_CURRENT_NOW:
400 ret = ds2780_get_current(dev_info, CURRENT_NOW, &val->intval);
401 break;
402
403 case POWER_SUPPLY_PROP_CURRENT_AVG:
404 ret = ds2780_get_current(dev_info, CURRENT_AVG, &val->intval);
405 break;
406
407 case POWER_SUPPLY_PROP_STATUS:
408 ret = ds2780_get_status(dev_info, &val->intval);
409 break;
410
411 case POWER_SUPPLY_PROP_CAPACITY:
412 ret = ds2780_get_capacity(dev_info, &val->intval);
413 break;
414
415 case POWER_SUPPLY_PROP_CHARGE_COUNTER:
416 ret = ds2780_get_accumulated_current(dev_info, &val->intval);
417 break;
418
419 case POWER_SUPPLY_PROP_CHARGE_NOW:
420 ret = ds2780_get_charge_now(dev_info, &val->intval);
421 break;
422
423 default:
424 ret = -EINVAL;
425 }
426
427 return ret;
428}
429
430static enum power_supply_property ds2780_battery_props[] = {
431 POWER_SUPPLY_PROP_STATUS,
432 POWER_SUPPLY_PROP_VOLTAGE_NOW,
433 POWER_SUPPLY_PROP_TEMP,
434 POWER_SUPPLY_PROP_MODEL_NAME,
435 POWER_SUPPLY_PROP_MANUFACTURER,
436 POWER_SUPPLY_PROP_CURRENT_NOW,
437 POWER_SUPPLY_PROP_CURRENT_AVG,
438 POWER_SUPPLY_PROP_CAPACITY,
439 POWER_SUPPLY_PROP_CHARGE_COUNTER,
440 POWER_SUPPLY_PROP_CHARGE_NOW,
441};
442
443static ssize_t ds2780_get_pmod_enabled(struct device *dev,
444 struct device_attribute *attr,
445 char *buf)
446{
447 int ret;
448 u8 control_reg;
449 struct power_supply *psy = to_power_supply(dev);
450 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
451
452 /* Get power mode */
453 ret = ds2780_get_control_register(dev_info, &control_reg);
454 if (ret < 0)
455 return ret;
456
457 return sprintf(buf, "%d\n",
458 !!(control_reg & DS2780_CONTROL_REG_PMOD));
459}
460
461static ssize_t ds2780_set_pmod_enabled(struct device *dev,
462 struct device_attribute *attr,
463 const char *buf,
464 size_t count)
465{
466 int ret;
467 u8 control_reg, new_setting;
468 struct power_supply *psy = to_power_supply(dev);
469 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
470
471 /* Set power mode */
472 ret = ds2780_get_control_register(dev_info, &control_reg);
473 if (ret < 0)
474 return ret;
475
476 ret = kstrtou8(buf, 0, &new_setting);
477 if (ret < 0)
478 return ret;
479
480 if ((new_setting != 0) && (new_setting != 1)) {
481 dev_err(dev_info->dev, "Invalid pmod setting (0 or 1)\n");
482 return -EINVAL;
483 }
484
485 if (new_setting)
486 control_reg |= DS2780_CONTROL_REG_PMOD;
487 else
488 control_reg &= ~DS2780_CONTROL_REG_PMOD;
489
490 ret = ds2780_set_control_register(dev_info, control_reg);
491 if (ret < 0)
492 return ret;
493
494 return count;
495}
496
497static ssize_t ds2780_get_sense_resistor_value(struct device *dev,
498 struct device_attribute *attr,
499 char *buf)
500{
501 int ret;
502 u8 sense_resistor;
503 struct power_supply *psy = to_power_supply(dev);
504 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
505
506 ret = ds2780_read8(dev_info->w1_dev, &sense_resistor, DS2780_RSNSP_REG);
507 if (ret < 0)
508 return ret;
509
510 ret = sprintf(buf, "%d\n", sense_resistor);
511 return ret;
512}
513
514static ssize_t ds2780_set_sense_resistor_value(struct device *dev,
515 struct device_attribute *attr,
516 const char *buf,
517 size_t count)
518{
519 int ret;
520 u8 new_setting;
521 struct power_supply *psy = to_power_supply(dev);
522 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
523
524 ret = kstrtou8(buf, 0, &new_setting);
525 if (ret < 0)
526 return ret;
527
528 ret = ds2780_set_sense_register(dev_info, new_setting);
529 if (ret < 0)
530 return ret;
531
532 return count;
533}
534
535static ssize_t ds2780_get_rsgain_setting(struct device *dev,
536 struct device_attribute *attr,
537 char *buf)
538{
539 int ret;
540 u16 rsgain;
541 struct power_supply *psy = to_power_supply(dev);
542 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
543
544 ret = ds2780_get_rsgain_register(dev_info, &rsgain);
545 if (ret < 0)
546 return ret;
547
548 return sprintf(buf, "%d\n", rsgain);
549}
550
551static ssize_t ds2780_set_rsgain_setting(struct device *dev,
552 struct device_attribute *attr,
553 const char *buf,
554 size_t count)
555{
556 int ret;
557 u16 new_setting;
558 struct power_supply *psy = to_power_supply(dev);
559 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
560
561 ret = kstrtou16(buf, 0, &new_setting);
562 if (ret < 0)
563 return ret;
564
565 /* Gain can only be from 0 to 1.999 in steps of .001 */
566 if (new_setting > 1999) {
567 dev_err(dev_info->dev, "Invalid rsgain setting (0 - 1999)\n");
568 return -EINVAL;
569 }
570
571 ret = ds2780_set_rsgain_register(dev_info, new_setting);
572 if (ret < 0)
573 return ret;
574
575 return count;
576}
577
578static ssize_t ds2780_get_pio_pin(struct device *dev,
579 struct device_attribute *attr,
580 char *buf)
581{
582 int ret;
583 u8 sfr;
584 struct power_supply *psy = to_power_supply(dev);
585 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
586
587 ret = ds2780_read8(dev_info->w1_dev, &sfr, DS2780_SFR_REG);
588 if (ret < 0)
589 return ret;
590
591 ret = sprintf(buf, "%d\n", sfr & DS2780_SFR_REG_PIOSC);
592 return ret;
593}
594
595static ssize_t ds2780_set_pio_pin(struct device *dev,
596 struct device_attribute *attr,
597 const char *buf,
598 size_t count)
599{
600 int ret;
601 u8 new_setting;
602 struct power_supply *psy = to_power_supply(dev);
603 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
604
605 ret = kstrtou8(buf, 0, &new_setting);
606 if (ret < 0)
607 return ret;
608
609 if ((new_setting != 0) && (new_setting != 1)) {
610 dev_err(dev_info->dev, "Invalid pio_pin setting (0 or 1)\n");
611 return -EINVAL;
612 }
613
614 ret = ds2780_write(dev_info->w1_dev, &new_setting,
615 DS2780_SFR_REG, sizeof(u8));
616 if (ret < 0)
617 return ret;
618
619 return count;
620}
621
622static ssize_t ds2780_read_param_eeprom_bin(struct file *filp,
623 struct kobject *kobj,
624 struct bin_attribute *bin_attr,
625 char *buf, loff_t off, size_t count)
626{
627 struct device *dev = container_of(kobj, struct device, kobj);
628 struct power_supply *psy = to_power_supply(dev);
629 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
630
631 count = min_t(loff_t, count,
632 DS2780_EEPROM_BLOCK1_END -
633 DS2780_EEPROM_BLOCK1_START + 1 - off);
634
635 return ds2780_read_block(dev_info->w1_dev, buf,
636 DS2780_EEPROM_BLOCK1_START + off, count);
637}
638
639static ssize_t ds2780_write_param_eeprom_bin(struct file *filp,
640 struct kobject *kobj,
641 struct bin_attribute *bin_attr,
642 char *buf, loff_t off, size_t count)
643{
644 struct device *dev = container_of(kobj, struct device, kobj);
645 struct power_supply *psy = to_power_supply(dev);
646 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
647 int ret;
648
649 count = min_t(loff_t, count,
650 DS2780_EEPROM_BLOCK1_END -
651 DS2780_EEPROM_BLOCK1_START + 1 - off);
652
653 ret = ds2780_write(dev_info->w1_dev, buf,
654 DS2780_EEPROM_BLOCK1_START + off, count);
655 if (ret < 0)
656 return ret;
657
658 ret = ds2780_save_eeprom(dev_info, DS2780_EEPROM_BLOCK1_START);
659 if (ret < 0)
660 return ret;
661
662 return count;
663}
664
665static struct bin_attribute ds2780_param_eeprom_bin_attr = {
666 .attr = {
667 .name = "param_eeprom",
668 .mode = S_IRUGO | S_IWUSR,
669 },
670 .size = DS2780_EEPROM_BLOCK1_END - DS2780_EEPROM_BLOCK1_START + 1,
671 .read = ds2780_read_param_eeprom_bin,
672 .write = ds2780_write_param_eeprom_bin,
673};
674
675static ssize_t ds2780_read_user_eeprom_bin(struct file *filp,
676 struct kobject *kobj,
677 struct bin_attribute *bin_attr,
678 char *buf, loff_t off, size_t count)
679{
680 struct device *dev = container_of(kobj, struct device, kobj);
681 struct power_supply *psy = to_power_supply(dev);
682 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
683
684 count = min_t(loff_t, count,
685 DS2780_EEPROM_BLOCK0_END -
686 DS2780_EEPROM_BLOCK0_START + 1 - off);
687
688 return ds2780_read_block(dev_info->w1_dev, buf,
689 DS2780_EEPROM_BLOCK0_START + off, count);
690
691}
692
693static ssize_t ds2780_write_user_eeprom_bin(struct file *filp,
694 struct kobject *kobj,
695 struct bin_attribute *bin_attr,
696 char *buf, loff_t off, size_t count)
697{
698 struct device *dev = container_of(kobj, struct device, kobj);
699 struct power_supply *psy = to_power_supply(dev);
700 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
701 int ret;
702
703 count = min_t(loff_t, count,
704 DS2780_EEPROM_BLOCK0_END -
705 DS2780_EEPROM_BLOCK0_START + 1 - off);
706
707 ret = ds2780_write(dev_info->w1_dev, buf,
708 DS2780_EEPROM_BLOCK0_START + off, count);
709 if (ret < 0)
710 return ret;
711
712 ret = ds2780_save_eeprom(dev_info, DS2780_EEPROM_BLOCK0_START);
713 if (ret < 0)
714 return ret;
715
716 return count;
717}
718
719static struct bin_attribute ds2780_user_eeprom_bin_attr = {
720 .attr = {
721 .name = "user_eeprom",
722 .mode = S_IRUGO | S_IWUSR,
723 },
724 .size = DS2780_EEPROM_BLOCK0_END - DS2780_EEPROM_BLOCK0_START + 1,
725 .read = ds2780_read_user_eeprom_bin,
726 .write = ds2780_write_user_eeprom_bin,
727};
728
729static DEVICE_ATTR(pmod_enabled, S_IRUGO | S_IWUSR, ds2780_get_pmod_enabled,
730 ds2780_set_pmod_enabled);
731static DEVICE_ATTR(sense_resistor_value, S_IRUGO | S_IWUSR,
732 ds2780_get_sense_resistor_value, ds2780_set_sense_resistor_value);
733static DEVICE_ATTR(rsgain_setting, S_IRUGO | S_IWUSR, ds2780_get_rsgain_setting,
734 ds2780_set_rsgain_setting);
735static DEVICE_ATTR(pio_pin, S_IRUGO | S_IWUSR, ds2780_get_pio_pin,
736 ds2780_set_pio_pin);
737
738
739static struct attribute *ds2780_attributes[] = {
740 &dev_attr_pmod_enabled.attr,
741 &dev_attr_sense_resistor_value.attr,
742 &dev_attr_rsgain_setting.attr,
743 &dev_attr_pio_pin.attr,
744 NULL
745};
746
747static const struct attribute_group ds2780_attr_group = {
748 .attrs = ds2780_attributes,
749};
750
751static int __devinit ds2780_battery_probe(struct platform_device *pdev)
752{
753 int ret = 0;
754 struct ds2780_device_info *dev_info;
755
756 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
757 if (!dev_info) {
758 ret = -ENOMEM;
759 goto fail;
760 }
761
762 platform_set_drvdata(pdev, dev_info);
763
764 dev_info->dev = &pdev->dev;
765 dev_info->w1_dev = pdev->dev.parent;
766 dev_info->bat.name = dev_name(&pdev->dev);
767 dev_info->bat.type = POWER_SUPPLY_TYPE_BATTERY;
768 dev_info->bat.properties = ds2780_battery_props;
769 dev_info->bat.num_properties = ARRAY_SIZE(ds2780_battery_props);
770 dev_info->bat.get_property = ds2780_battery_get_property;
771
772 ret = power_supply_register(&pdev->dev, &dev_info->bat);
773 if (ret) {
774 dev_err(dev_info->dev, "failed to register battery\n");
775 goto fail_free_info;
776 }
777
778 ret = sysfs_create_group(&dev_info->bat.dev->kobj, &ds2780_attr_group);
779 if (ret) {
780 dev_err(dev_info->dev, "failed to create sysfs group\n");
781 goto fail_unregister;
782 }
783
784 ret = sysfs_create_bin_file(&dev_info->bat.dev->kobj,
785 &ds2780_param_eeprom_bin_attr);
786 if (ret) {
787 dev_err(dev_info->dev,
788 "failed to create param eeprom bin file");
789 goto fail_remove_group;
790 }
791
792 ret = sysfs_create_bin_file(&dev_info->bat.dev->kobj,
793 &ds2780_user_eeprom_bin_attr);
794 if (ret) {
795 dev_err(dev_info->dev,
796 "failed to create user eeprom bin file");
797 goto fail_remove_bin_file;
798 }
799
800 return 0;
801
802fail_remove_bin_file:
803 sysfs_remove_bin_file(&dev_info->bat.dev->kobj,
804 &ds2780_param_eeprom_bin_attr);
805fail_remove_group:
806 sysfs_remove_group(&dev_info->bat.dev->kobj, &ds2780_attr_group);
807fail_unregister:
808 power_supply_unregister(&dev_info->bat);
809fail_free_info:
810 kfree(dev_info);
811fail:
812 return ret;
813}
814
815static int __devexit ds2780_battery_remove(struct platform_device *pdev)
816{
817 struct ds2780_device_info *dev_info = platform_get_drvdata(pdev);
818
819 /* remove attributes */
820 sysfs_remove_group(&dev_info->bat.dev->kobj, &ds2780_attr_group);
821
822 power_supply_unregister(&dev_info->bat);
823
824 kfree(dev_info);
825 return 0;
826}
827
828MODULE_ALIAS("platform:ds2780-battery");
829
830static struct platform_driver ds2780_battery_driver = {
831 .driver = {
832 .name = "ds2780-battery",
833 },
834 .probe = ds2780_battery_probe,
835 .remove = ds2780_battery_remove,
836};
837
838static int __init ds2780_battery_init(void)
839{
840 return platform_driver_register(&ds2780_battery_driver);
841}
842
843static void __exit ds2780_battery_exit(void)
844{
845 platform_driver_unregister(&ds2780_battery_driver);
846}
847
848module_init(ds2780_battery_init);
849module_exit(ds2780_battery_exit);
850
851MODULE_LICENSE("GPL");
852MODULE_AUTHOR("Clifton Barnes <cabarnes@indesign-llc.com>");
853MODULE_DESCRIPTION("Maxim/Dallas DS2780 Stand-Alone Fuel Gauage IC driver");
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
index 25b88ac1d44c..718f2c537827 100644
--- a/drivers/power/gpio-charger.c
+++ b/drivers/power/gpio-charger.c
@@ -161,12 +161,27 @@ static int __devexit gpio_charger_remove(struct platform_device *pdev)
161 return 0; 161 return 0;
162} 162}
163 163
164#ifdef CONFIG_PM_SLEEP
165static int gpio_charger_resume(struct device *dev)
166{
167 struct platform_device *pdev = to_platform_device(dev);
168 struct gpio_charger *gpio_charger = platform_get_drvdata(pdev);
169
170 power_supply_changed(&gpio_charger->charger);
171
172 return 0;
173}
174#endif
175
176static SIMPLE_DEV_PM_OPS(gpio_charger_pm_ops, NULL, gpio_charger_resume);
177
164static struct platform_driver gpio_charger_driver = { 178static struct platform_driver gpio_charger_driver = {
165 .probe = gpio_charger_probe, 179 .probe = gpio_charger_probe,
166 .remove = __devexit_p(gpio_charger_remove), 180 .remove = __devexit_p(gpio_charger_remove),
167 .driver = { 181 .driver = {
168 .name = "gpio-charger", 182 .name = "gpio-charger",
169 .owner = THIS_MODULE, 183 .owner = THIS_MODULE,
184 .pm = &gpio_charger_pm_ops,
170 }, 185 },
171}; 186};
172 187
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 2ad9b14a5ce3..f6d72b402a8e 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -33,6 +33,7 @@
33#include <linux/usb/ulpi.h> 33#include <linux/usb/ulpi.h>
34#include <linux/usb/ch9.h> 34#include <linux/usb/ch9.h>
35#include <linux/usb/gadget.h> 35#include <linux/usb/gadget.h>
36#include <linux/power/isp1704_charger.h>
36 37
37/* Vendor specific Power Control register */ 38/* Vendor specific Power Control register */
38#define ISP1704_PWR_CTRL 0x3d 39#define ISP1704_PWR_CTRL 0x3d
@@ -71,6 +72,18 @@ struct isp1704_charger {
71}; 72};
72 73
73/* 74/*
75 * Disable/enable the power from the isp1704 if a function for it
76 * has been provided with platform data.
77 */
78static void isp1704_charger_set_power(struct isp1704_charger *isp, bool on)
79{
80 struct isp1704_charger_data *board = isp->dev->platform_data;
81
82 if (board->set_power)
83 board->set_power(on);
84}
85
86/*
74 * Determine is the charging port DCP (dedicated charger) or CDP (Host/HUB 87 * Determine is the charging port DCP (dedicated charger) or CDP (Host/HUB
75 * chargers). 88 * chargers).
76 * 89 *
@@ -222,6 +235,9 @@ static void isp1704_charger_work(struct work_struct *data)
222 235
223 mutex_lock(&lock); 236 mutex_lock(&lock);
224 237
238 if (event != USB_EVENT_NONE)
239 isp1704_charger_set_power(isp, 1);
240
225 switch (event) { 241 switch (event) {
226 case USB_EVENT_VBUS: 242 case USB_EVENT_VBUS:
227 isp->online = true; 243 isp->online = true;
@@ -269,6 +285,8 @@ static void isp1704_charger_work(struct work_struct *data)
269 */ 285 */
270 if (isp->otg->gadget) 286 if (isp->otg->gadget)
271 usb_gadget_disconnect(isp->otg->gadget); 287 usb_gadget_disconnect(isp->otg->gadget);
288
289 isp1704_charger_set_power(isp, 0);
272 break; 290 break;
273 case USB_EVENT_ENUMERATED: 291 case USB_EVENT_ENUMERATED:
274 if (isp->present) 292 if (isp->present)
@@ -394,6 +412,8 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev)
394 isp->dev = &pdev->dev; 412 isp->dev = &pdev->dev;
395 platform_set_drvdata(pdev, isp); 413 platform_set_drvdata(pdev, isp);
396 414
415 isp1704_charger_set_power(isp, 1);
416
397 ret = isp1704_test_ulpi(isp); 417 ret = isp1704_test_ulpi(isp);
398 if (ret < 0) 418 if (ret < 0)
399 goto fail1; 419 goto fail1;
@@ -434,6 +454,7 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev)
434 454
435 /* Detect charger if VBUS is valid (the cable was already plugged). */ 455 /* Detect charger if VBUS is valid (the cable was already plugged). */
436 ret = otg_io_read(isp->otg, ULPI_USB_INT_STS); 456 ret = otg_io_read(isp->otg, ULPI_USB_INT_STS);
457 isp1704_charger_set_power(isp, 0);
437 if ((ret & ULPI_INT_VBUS_VALID) && !isp->otg->default_a) { 458 if ((ret & ULPI_INT_VBUS_VALID) && !isp->otg->default_a) {
438 isp->event = USB_EVENT_VBUS; 459 isp->event = USB_EVENT_VBUS;
439 schedule_work(&isp->work); 460 schedule_work(&isp->work);
@@ -459,6 +480,7 @@ static int __devexit isp1704_charger_remove(struct platform_device *pdev)
459 otg_unregister_notifier(isp->otg, &isp->nb); 480 otg_unregister_notifier(isp->otg, &isp->nb);
460 power_supply_unregister(&isp->psy); 481 power_supply_unregister(&isp->psy);
461 otg_put_transceiver(isp->otg); 482 otg_put_transceiver(isp->otg);
483 isp1704_charger_set_power(isp, 0);
462 kfree(isp); 484 kfree(isp);
463 485
464 return 0; 486 return 0;
diff --git a/drivers/power/max8903_charger.c b/drivers/power/max8903_charger.c
new file mode 100644
index 000000000000..33ff0e37809e
--- /dev/null
+++ b/drivers/power/max8903_charger.c
@@ -0,0 +1,391 @@
1/*
2 * max8903_charger.c - Maxim 8903 USB/Adapter Charger Driver
3 *
4 * Copyright (C) 2011 Samsung Electronics
5 * MyungJoo Ham <myungjoo.ham@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/gpio.h>
24#include <linux/interrupt.h>
25#include <linux/slab.h>
26#include <linux/power_supply.h>
27#include <linux/platform_device.h>
28#include <linux/power/max8903_charger.h>
29
30struct max8903_data {
31 struct max8903_pdata *pdata;
32 struct device *dev;
33 struct power_supply psy;
34 bool fault;
35 bool usb_in;
36 bool ta_in;
37};
38
39static enum power_supply_property max8903_charger_props[] = {
40 POWER_SUPPLY_PROP_STATUS, /* Charger status output */
41 POWER_SUPPLY_PROP_ONLINE, /* External power source */
42 POWER_SUPPLY_PROP_HEALTH, /* Fault or OK */
43};
44
45static int max8903_get_property(struct power_supply *psy,
46 enum power_supply_property psp,
47 union power_supply_propval *val)
48{
49 struct max8903_data *data = container_of(psy,
50 struct max8903_data, psy);
51
52 switch (psp) {
53 case POWER_SUPPLY_PROP_STATUS:
54 val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
55 if (data->pdata->chg) {
56 if (gpio_get_value(data->pdata->chg) == 0)
57 val->intval = POWER_SUPPLY_STATUS_CHARGING;
58 else if (data->usb_in || data->ta_in)
59 val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
60 else
61 val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
62 }
63 break;
64 case POWER_SUPPLY_PROP_ONLINE:
65 val->intval = 0;
66 if (data->usb_in || data->ta_in)
67 val->intval = 1;
68 break;
69 case POWER_SUPPLY_PROP_HEALTH:
70 val->intval = POWER_SUPPLY_HEALTH_GOOD;
71 if (data->fault)
72 val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
73 break;
74 default:
75 return -EINVAL;
76 }
77 return 0;
78}
79
80static irqreturn_t max8903_dcin(int irq, void *_data)
81{
82 struct max8903_data *data = _data;
83 struct max8903_pdata *pdata = data->pdata;
84 bool ta_in;
85 enum power_supply_type old_type;
86
87 ta_in = gpio_get_value(pdata->dok) ? false : true;
88
89 if (ta_in == data->ta_in)
90 return IRQ_HANDLED;
91
92 data->ta_in = ta_in;
93
94 /* Set Current-Limit-Mode 1:DC 0:USB */
95 if (pdata->dcm)
96 gpio_set_value(pdata->dcm, ta_in ? 1 : 0);
97
98 /* Charger Enable / Disable (cen is negated) */
99 if (pdata->cen)
100 gpio_set_value(pdata->cen, ta_in ? 0 :
101 (data->usb_in ? 0 : 1));
102
103 dev_dbg(data->dev, "TA(DC-IN) Charger %s.\n", ta_in ?
104 "Connected" : "Disconnected");
105
106 old_type = data->psy.type;
107
108 if (data->ta_in)
109 data->psy.type = POWER_SUPPLY_TYPE_MAINS;
110 else if (data->usb_in)
111 data->psy.type = POWER_SUPPLY_TYPE_USB;
112 else
113 data->psy.type = POWER_SUPPLY_TYPE_BATTERY;
114
115 if (old_type != data->psy.type)
116 power_supply_changed(&data->psy);
117
118 return IRQ_HANDLED;
119}
120
121static irqreturn_t max8903_usbin(int irq, void *_data)
122{
123 struct max8903_data *data = _data;
124 struct max8903_pdata *pdata = data->pdata;
125 bool usb_in;
126 enum power_supply_type old_type;
127
128 usb_in = gpio_get_value(pdata->uok) ? false : true;
129
130 if (usb_in == data->usb_in)
131 return IRQ_HANDLED;
132
133 data->usb_in = usb_in;
134
135 /* Do not touch Current-Limit-Mode */
136
137 /* Charger Enable / Disable (cen is negated) */
138 if (pdata->cen)
139 gpio_set_value(pdata->cen, usb_in ? 0 :
140 (data->ta_in ? 0 : 1));
141
142 dev_dbg(data->dev, "USB Charger %s.\n", usb_in ?
143 "Connected" : "Disconnected");
144
145 old_type = data->psy.type;
146
147 if (data->ta_in)
148 data->psy.type = POWER_SUPPLY_TYPE_MAINS;
149 else if (data->usb_in)
150 data->psy.type = POWER_SUPPLY_TYPE_USB;
151 else
152 data->psy.type = POWER_SUPPLY_TYPE_BATTERY;
153
154 if (old_type != data->psy.type)
155 power_supply_changed(&data->psy);
156
157 return IRQ_HANDLED;
158}
159
160static irqreturn_t max8903_fault(int irq, void *_data)
161{
162 struct max8903_data *data = _data;
163 struct max8903_pdata *pdata = data->pdata;
164 bool fault;
165
166 fault = gpio_get_value(pdata->flt) ? false : true;
167
168 if (fault == data->fault)
169 return IRQ_HANDLED;
170
171 data->fault = fault;
172
173 if (fault)
174 dev_err(data->dev, "Charger suffers a fault and stops.\n");
175 else
176 dev_err(data->dev, "Charger recovered from a fault.\n");
177
178 return IRQ_HANDLED;
179}
180
181static __devinit int max8903_probe(struct platform_device *pdev)
182{
183 struct max8903_data *data;
184 struct device *dev = &pdev->dev;
185 struct max8903_pdata *pdata = pdev->dev.platform_data;
186 int ret = 0;
187 int gpio;
188 int ta_in = 0;
189 int usb_in = 0;
190
191 data = kzalloc(sizeof(struct max8903_data), GFP_KERNEL);
192 if (data == NULL) {
193 dev_err(dev, "Cannot allocate memory.\n");
194 return -ENOMEM;
195 }
196 data->pdata = pdata;
197 data->dev = dev;
198 platform_set_drvdata(pdev, data);
199
200 if (pdata->dc_valid == false && pdata->usb_valid == false) {
201 dev_err(dev, "No valid power sources.\n");
202 ret = -EINVAL;
203 goto err;
204 }
205
206 if (pdata->dc_valid) {
207 if (pdata->dok && gpio_is_valid(pdata->dok) &&
208 pdata->dcm && gpio_is_valid(pdata->dcm)) {
209 gpio = pdata->dok; /* PULL_UPed Interrupt */
210 ta_in = gpio_get_value(gpio) ? 0 : 1;
211
212 gpio = pdata->dcm; /* Output */
213 gpio_set_value(gpio, ta_in);
214 } else {
215 dev_err(dev, "When DC is wired, DOK and DCM should"
216 " be wired as well.\n");
217 ret = -EINVAL;
218 goto err;
219 }
220 } else {
221 if (pdata->dcm) {
222 if (gpio_is_valid(pdata->dcm))
223 gpio_set_value(pdata->dcm, 0);
224 else {
225 dev_err(dev, "Invalid pin: dcm.\n");
226 ret = -EINVAL;
227 goto err;
228 }
229 }
230 }
231
232 if (pdata->usb_valid) {
233 if (pdata->uok && gpio_is_valid(pdata->uok)) {
234 gpio = pdata->uok;
235 usb_in = gpio_get_value(gpio) ? 0 : 1;
236 } else {
237 dev_err(dev, "When USB is wired, UOK should be wired."
238 "as well.\n");
239 ret = -EINVAL;
240 goto err;
241 }
242 }
243
244 if (pdata->cen) {
245 if (gpio_is_valid(pdata->cen)) {
246 gpio_set_value(pdata->cen, (ta_in || usb_in) ? 0 : 1);
247 } else {
248 dev_err(dev, "Invalid pin: cen.\n");
249 ret = -EINVAL;
250 goto err;
251 }
252 }
253
254 if (pdata->chg) {
255 if (!gpio_is_valid(pdata->chg)) {
256 dev_err(dev, "Invalid pin: chg.\n");
257 ret = -EINVAL;
258 goto err;
259 }
260 }
261
262 if (pdata->flt) {
263 if (!gpio_is_valid(pdata->flt)) {
264 dev_err(dev, "Invalid pin: flt.\n");
265 ret = -EINVAL;
266 goto err;
267 }
268 }
269
270 if (pdata->usus) {
271 if (!gpio_is_valid(pdata->usus)) {
272 dev_err(dev, "Invalid pin: usus.\n");
273 ret = -EINVAL;
274 goto err;
275 }
276 }
277
278 data->fault = false;
279 data->ta_in = ta_in;
280 data->usb_in = usb_in;
281
282 data->psy.name = "max8903_charger";
283 data->psy.type = (ta_in) ? POWER_SUPPLY_TYPE_MAINS :
284 ((usb_in) ? POWER_SUPPLY_TYPE_USB :
285 POWER_SUPPLY_TYPE_BATTERY);
286 data->psy.get_property = max8903_get_property;
287 data->psy.properties = max8903_charger_props;
288 data->psy.num_properties = ARRAY_SIZE(max8903_charger_props);
289
290 ret = power_supply_register(dev, &data->psy);
291 if (ret) {
292 dev_err(dev, "failed: power supply register.\n");
293 goto err;
294 }
295
296 if (pdata->dc_valid) {
297 ret = request_threaded_irq(gpio_to_irq(pdata->dok),
298 NULL, max8903_dcin,
299 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
300 "MAX8903 DC IN", data);
301 if (ret) {
302 dev_err(dev, "Cannot request irq %d for DC (%d)\n",
303 gpio_to_irq(pdata->dok), ret);
304 goto err_psy;
305 }
306 }
307
308 if (pdata->usb_valid) {
309 ret = request_threaded_irq(gpio_to_irq(pdata->uok),
310 NULL, max8903_usbin,
311 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
312 "MAX8903 USB IN", data);
313 if (ret) {
314 dev_err(dev, "Cannot request irq %d for USB (%d)\n",
315 gpio_to_irq(pdata->uok), ret);
316 goto err_dc_irq;
317 }
318 }
319
320 if (pdata->flt) {
321 ret = request_threaded_irq(gpio_to_irq(pdata->flt),
322 NULL, max8903_fault,
323 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
324 "MAX8903 Fault", data);
325 if (ret) {
326 dev_err(dev, "Cannot request irq %d for Fault (%d)\n",
327 gpio_to_irq(pdata->flt), ret);
328 goto err_usb_irq;
329 }
330 }
331
332 return 0;
333
334err_usb_irq:
335 if (pdata->usb_valid)
336 free_irq(gpio_to_irq(pdata->uok), data);
337err_dc_irq:
338 if (pdata->dc_valid)
339 free_irq(gpio_to_irq(pdata->dok), data);
340err_psy:
341 power_supply_unregister(&data->psy);
342err:
343 kfree(data);
344 return ret;
345}
346
347static __devexit int max8903_remove(struct platform_device *pdev)
348{
349 struct max8903_data *data = platform_get_drvdata(pdev);
350
351 if (data) {
352 struct max8903_pdata *pdata = data->pdata;
353
354 if (pdata->flt)
355 free_irq(gpio_to_irq(pdata->flt), data);
356 if (pdata->usb_valid)
357 free_irq(gpio_to_irq(pdata->uok), data);
358 if (pdata->dc_valid)
359 free_irq(gpio_to_irq(pdata->dok), data);
360 power_supply_unregister(&data->psy);
361 kfree(data);
362 }
363
364 return 0;
365}
366
367static struct platform_driver max8903_driver = {
368 .probe = max8903_probe,
369 .remove = __devexit_p(max8903_remove),
370 .driver = {
371 .name = "max8903-charger",
372 .owner = THIS_MODULE,
373 },
374};
375
376static int __init max8903_init(void)
377{
378 return platform_driver_register(&max8903_driver);
379}
380module_init(max8903_init);
381
382static void __exit max8903_exit(void)
383{
384 platform_driver_unregister(&max8903_driver);
385}
386module_exit(max8903_exit);
387
388MODULE_LICENSE("GPL");
389MODULE_DESCRIPTION("MAX8903 Charger Driver");
390MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
391MODULE_ALIAS("max8903-charger");
diff --git a/drivers/power/test_power.c b/drivers/power/test_power.c
index 0cd9f67d33e5..b527c93bf2f3 100644
--- a/drivers/power/test_power.c
+++ b/drivers/power/test_power.c
@@ -3,6 +3,12 @@
3 * 3 *
4 * Copyright 2010 Anton Vorontsov <cbouatmailru@gmail.com> 4 * Copyright 2010 Anton Vorontsov <cbouatmailru@gmail.com>
5 * 5 *
6 * Dynamic module parameter code from the Virtual Battery Driver
7 * Copyright (C) 2008 Pylone, Inc.
8 * By: Masashi YOKOTA <yokota@pylone.jp>
9 * Originally found here:
10 * http://downloads.pylone.jp/src/virtual_battery/virtual_battery-0.0.1.tar.bz2
11 *
6 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 13 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 14 * published by the Free Software Foundation.
@@ -15,8 +21,12 @@
15#include <linux/delay.h> 21#include <linux/delay.h>
16#include <linux/vermagic.h> 22#include <linux/vermagic.h>
17 23
18static int test_power_ac_online = 1; 24static int ac_online = 1;
19static int test_power_battery_status = POWER_SUPPLY_STATUS_CHARGING; 25static int battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
26static int battery_health = POWER_SUPPLY_HEALTH_GOOD;
27static int battery_present = 1; /* true */
28static int battery_technology = POWER_SUPPLY_TECHNOLOGY_LION;
29static int battery_capacity = 50;
20 30
21static int test_power_get_ac_property(struct power_supply *psy, 31static int test_power_get_ac_property(struct power_supply *psy,
22 enum power_supply_property psp, 32 enum power_supply_property psp,
@@ -24,7 +34,7 @@ static int test_power_get_ac_property(struct power_supply *psy,
24{ 34{
25 switch (psp) { 35 switch (psp) {
26 case POWER_SUPPLY_PROP_ONLINE: 36 case POWER_SUPPLY_PROP_ONLINE:
27 val->intval = test_power_ac_online; 37 val->intval = ac_online;
28 break; 38 break;
29 default: 39 default:
30 return -EINVAL; 40 return -EINVAL;
@@ -47,22 +57,30 @@ static int test_power_get_battery_property(struct power_supply *psy,
47 val->strval = UTS_RELEASE; 57 val->strval = UTS_RELEASE;
48 break; 58 break;
49 case POWER_SUPPLY_PROP_STATUS: 59 case POWER_SUPPLY_PROP_STATUS:
50 val->intval = test_power_battery_status; 60 val->intval = battery_status;
51 break; 61 break;
52 case POWER_SUPPLY_PROP_CHARGE_TYPE: 62 case POWER_SUPPLY_PROP_CHARGE_TYPE:
53 val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST; 63 val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
54 break; 64 break;
55 case POWER_SUPPLY_PROP_HEALTH: 65 case POWER_SUPPLY_PROP_HEALTH:
56 val->intval = POWER_SUPPLY_HEALTH_GOOD; 66 val->intval = battery_health;
67 break;
68 case POWER_SUPPLY_PROP_PRESENT:
69 val->intval = battery_present;
57 break; 70 break;
58 case POWER_SUPPLY_PROP_TECHNOLOGY: 71 case POWER_SUPPLY_PROP_TECHNOLOGY:
59 val->intval = POWER_SUPPLY_TECHNOLOGY_LION; 72 val->intval = battery_technology;
60 break; 73 break;
61 case POWER_SUPPLY_PROP_CAPACITY_LEVEL: 74 case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
62 val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; 75 val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
63 break; 76 break;
64 case POWER_SUPPLY_PROP_CAPACITY: 77 case POWER_SUPPLY_PROP_CAPACITY:
65 val->intval = 50; 78 case POWER_SUPPLY_PROP_CHARGE_NOW:
79 val->intval = battery_capacity;
80 break;
81 case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
82 case POWER_SUPPLY_PROP_CHARGE_FULL:
83 val->intval = 100;
66 break; 84 break;
67 case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: 85 case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
68 case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: 86 case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
@@ -84,9 +102,11 @@ static enum power_supply_property test_power_battery_props[] = {
84 POWER_SUPPLY_PROP_STATUS, 102 POWER_SUPPLY_PROP_STATUS,
85 POWER_SUPPLY_PROP_CHARGE_TYPE, 103 POWER_SUPPLY_PROP_CHARGE_TYPE,
86 POWER_SUPPLY_PROP_HEALTH, 104 POWER_SUPPLY_PROP_HEALTH,
105 POWER_SUPPLY_PROP_PRESENT,
87 POWER_SUPPLY_PROP_TECHNOLOGY, 106 POWER_SUPPLY_PROP_TECHNOLOGY,
107 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
88 POWER_SUPPLY_PROP_CHARGE_FULL, 108 POWER_SUPPLY_PROP_CHARGE_FULL,
89 POWER_SUPPLY_PROP_CHARGE_EMPTY, 109 POWER_SUPPLY_PROP_CHARGE_NOW,
90 POWER_SUPPLY_PROP_CAPACITY, 110 POWER_SUPPLY_PROP_CAPACITY,
91 POWER_SUPPLY_PROP_CAPACITY_LEVEL, 111 POWER_SUPPLY_PROP_CAPACITY_LEVEL,
92 POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, 112 POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
@@ -118,6 +138,7 @@ static struct power_supply test_power_supplies[] = {
118 }, 138 },
119}; 139};
120 140
141
121static int __init test_power_init(void) 142static int __init test_power_init(void)
122{ 143{
123 int i; 144 int i;
@@ -145,8 +166,8 @@ static void __exit test_power_exit(void)
145 int i; 166 int i;
146 167
147 /* Let's see how we handle changes... */ 168 /* Let's see how we handle changes... */
148 test_power_ac_online = 0; 169 ac_online = 0;
149 test_power_battery_status = POWER_SUPPLY_STATUS_DISCHARGING; 170 battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
150 for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++) 171 for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++)
151 power_supply_changed(&test_power_supplies[i]); 172 power_supply_changed(&test_power_supplies[i]);
152 pr_info("%s: 'changed' event sent, sleeping for 10 seconds...\n", 173 pr_info("%s: 'changed' event sent, sleeping for 10 seconds...\n",
@@ -158,6 +179,241 @@ static void __exit test_power_exit(void)
158} 179}
159module_exit(test_power_exit); 180module_exit(test_power_exit);
160 181
182
183
184#define MAX_KEYLENGTH 256
185struct battery_property_map {
186 int value;
187 char const *key;
188};
189
190static struct battery_property_map map_ac_online[] = {
191 { 0, "on" },
192 { 1, "off" },
193 { -1, NULL },
194};
195
196static struct battery_property_map map_status[] = {
197 { POWER_SUPPLY_STATUS_CHARGING, "charging" },
198 { POWER_SUPPLY_STATUS_DISCHARGING, "discharging" },
199 { POWER_SUPPLY_STATUS_NOT_CHARGING, "not-charging" },
200 { POWER_SUPPLY_STATUS_FULL, "full" },
201 { -1, NULL },
202};
203
204static struct battery_property_map map_health[] = {
205 { POWER_SUPPLY_HEALTH_GOOD, "good" },
206 { POWER_SUPPLY_HEALTH_OVERHEAT, "overheat" },
207 { POWER_SUPPLY_HEALTH_DEAD, "dead" },
208 { POWER_SUPPLY_HEALTH_OVERVOLTAGE, "overvoltage" },
209 { POWER_SUPPLY_HEALTH_UNSPEC_FAILURE, "failure" },
210 { -1, NULL },
211};
212
213static struct battery_property_map map_present[] = {
214 { 0, "false" },
215 { 1, "true" },
216 { -1, NULL },
217};
218
219static struct battery_property_map map_technology[] = {
220 { POWER_SUPPLY_TECHNOLOGY_NiMH, "NiMH" },
221 { POWER_SUPPLY_TECHNOLOGY_LION, "LION" },
222 { POWER_SUPPLY_TECHNOLOGY_LIPO, "LIPO" },
223 { POWER_SUPPLY_TECHNOLOGY_LiFe, "LiFe" },
224 { POWER_SUPPLY_TECHNOLOGY_NiCd, "NiCd" },
225 { POWER_SUPPLY_TECHNOLOGY_LiMn, "LiMn" },
226 { -1, NULL },
227};
228
229
230static int map_get_value(struct battery_property_map *map, const char *key,
231 int def_val)
232{
233 char buf[MAX_KEYLENGTH];
234 int cr;
235
236 strncpy(buf, key, MAX_KEYLENGTH);
237 buf[MAX_KEYLENGTH-1] = '\0';
238
239 cr = strnlen(buf, MAX_KEYLENGTH) - 1;
240 if (buf[cr] == '\n')
241 buf[cr] = '\0';
242
243 while (map->key) {
244 if (strncasecmp(map->key, buf, MAX_KEYLENGTH) == 0)
245 return map->value;
246 map++;
247 }
248
249 return def_val;
250}
251
252
253static const char *map_get_key(struct battery_property_map *map, int value,
254 const char *def_key)
255{
256 while (map->key) {
257 if (map->value == value)
258 return map->key;
259 map++;
260 }
261
262 return def_key;
263}
264
265static int param_set_ac_online(const char *key, const struct kernel_param *kp)
266{
267 ac_online = map_get_value(map_ac_online, key, ac_online);
268 power_supply_changed(&test_power_supplies[0]);
269 return 0;
270}
271
272static int param_get_ac_online(char *buffer, const struct kernel_param *kp)
273{
274 strcpy(buffer, map_get_key(map_ac_online, ac_online, "unknown"));
275 return strlen(buffer);
276}
277
278static int param_set_battery_status(const char *key,
279 const struct kernel_param *kp)
280{
281 battery_status = map_get_value(map_status, key, battery_status);
282 power_supply_changed(&test_power_supplies[1]);
283 return 0;
284}
285
286static int param_get_battery_status(char *buffer, const struct kernel_param *kp)
287{
288 strcpy(buffer, map_get_key(map_status, battery_status, "unknown"));
289 return strlen(buffer);
290}
291
292static int param_set_battery_health(const char *key,
293 const struct kernel_param *kp)
294{
295 battery_health = map_get_value(map_health, key, battery_health);
296 power_supply_changed(&test_power_supplies[1]);
297 return 0;
298}
299
300static int param_get_battery_health(char *buffer, const struct kernel_param *kp)
301{
302 strcpy(buffer, map_get_key(map_health, battery_health, "unknown"));
303 return strlen(buffer);
304}
305
306static int param_set_battery_present(const char *key,
307 const struct kernel_param *kp)
308{
309 battery_present = map_get_value(map_present, key, battery_present);
310 power_supply_changed(&test_power_supplies[0]);
311 return 0;
312}
313
314static int param_get_battery_present(char *buffer,
315 const struct kernel_param *kp)
316{
317 strcpy(buffer, map_get_key(map_present, battery_present, "unknown"));
318 return strlen(buffer);
319}
320
321static int param_set_battery_technology(const char *key,
322 const struct kernel_param *kp)
323{
324 battery_technology = map_get_value(map_technology, key,
325 battery_technology);
326 power_supply_changed(&test_power_supplies[1]);
327 return 0;
328}
329
330static int param_get_battery_technology(char *buffer,
331 const struct kernel_param *kp)
332{
333 strcpy(buffer,
334 map_get_key(map_technology, battery_technology, "unknown"));
335 return strlen(buffer);
336}
337
338static int param_set_battery_capacity(const char *key,
339 const struct kernel_param *kp)
340{
341 int tmp;
342
343 if (1 != sscanf(key, "%d", &tmp))
344 return -EINVAL;
345
346 battery_capacity = tmp;
347 power_supply_changed(&test_power_supplies[1]);
348 return 0;
349}
350
351#define param_get_battery_capacity param_get_int
352
353
354
355static struct kernel_param_ops param_ops_ac_online = {
356 .set = param_set_ac_online,
357 .get = param_get_ac_online,
358};
359
360static struct kernel_param_ops param_ops_battery_status = {
361 .set = param_set_battery_status,
362 .get = param_get_battery_status,
363};
364
365static struct kernel_param_ops param_ops_battery_present = {
366 .set = param_set_battery_present,
367 .get = param_get_battery_present,
368};
369
370static struct kernel_param_ops param_ops_battery_technology = {
371 .set = param_set_battery_technology,
372 .get = param_get_battery_technology,
373};
374
375static struct kernel_param_ops param_ops_battery_health = {
376 .set = param_set_battery_health,
377 .get = param_get_battery_health,
378};
379
380static struct kernel_param_ops param_ops_battery_capacity = {
381 .set = param_set_battery_capacity,
382 .get = param_get_battery_capacity,
383};
384
385
386#define param_check_ac_online(name, p) __param_check(name, p, void);
387#define param_check_battery_status(name, p) __param_check(name, p, void);
388#define param_check_battery_present(name, p) __param_check(name, p, void);
389#define param_check_battery_technology(name, p) __param_check(name, p, void);
390#define param_check_battery_health(name, p) __param_check(name, p, void);
391#define param_check_battery_capacity(name, p) __param_check(name, p, void);
392
393
394module_param(ac_online, ac_online, 0644);
395MODULE_PARM_DESC(ac_online, "AC charging state <on|off>");
396
397module_param(battery_status, battery_status, 0644);
398MODULE_PARM_DESC(battery_status,
399 "battery status <charging|discharging|not-charging|full>");
400
401module_param(battery_present, battery_present, 0644);
402MODULE_PARM_DESC(battery_present,
403 "battery presence state <good|overheat|dead|overvoltage|failure>");
404
405module_param(battery_technology, battery_technology, 0644);
406MODULE_PARM_DESC(battery_technology,
407 "battery technology <NiMH|LION|LIPO|LiFe|NiCd|LiMn>");
408
409module_param(battery_health, battery_health, 0644);
410MODULE_PARM_DESC(battery_health,
411 "battery health state <good|overheat|dead|overvoltage|failure>");
412
413module_param(battery_capacity, battery_capacity, 0644);
414MODULE_PARM_DESC(battery_capacity, "battery capacity (percentage)");
415
416
161MODULE_DESCRIPTION("Power supply driver for testing"); 417MODULE_DESCRIPTION("Power supply driver for testing");
162MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>"); 418MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
163MODULE_LICENSE("GPL"); 419MODULE_LICENSE("GPL");
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c
index e5ced3a4c1ed..d119c38b3ff6 100644
--- a/drivers/power/z2_battery.c
+++ b/drivers/power/z2_battery.c
@@ -271,24 +271,33 @@ static int __devexit z2_batt_remove(struct i2c_client *client)
271} 271}
272 272
273#ifdef CONFIG_PM 273#ifdef CONFIG_PM
274static int z2_batt_suspend(struct i2c_client *client, pm_message_t state) 274static int z2_batt_suspend(struct device *dev)
275{ 275{
276 struct i2c_client *client = to_i2c_client(dev);
276 struct z2_charger *charger = i2c_get_clientdata(client); 277 struct z2_charger *charger = i2c_get_clientdata(client);
277 278
278 flush_work_sync(&charger->bat_work); 279 flush_work_sync(&charger->bat_work);
279 return 0; 280 return 0;
280} 281}
281 282
282static int z2_batt_resume(struct i2c_client *client) 283static int z2_batt_resume(struct device *dev)
283{ 284{
285 struct i2c_client *client = to_i2c_client(dev);
284 struct z2_charger *charger = i2c_get_clientdata(client); 286 struct z2_charger *charger = i2c_get_clientdata(client);
285 287
286 schedule_work(&charger->bat_work); 288 schedule_work(&charger->bat_work);
287 return 0; 289 return 0;
288} 290}
291
292static const struct dev_pm_ops z2_battery_pm_ops = {
293 .suspend = z2_batt_suspend,
294 .resume = z2_batt_resume,
295};
296
297#define Z2_BATTERY_PM_OPS (&z2_battery_pm_ops)
298
289#else 299#else
290#define z2_batt_suspend NULL 300#define Z2_BATTERY_PM_OPS (NULL)
291#define z2_batt_resume NULL
292#endif 301#endif
293 302
294static const struct i2c_device_id z2_batt_id[] = { 303static const struct i2c_device_id z2_batt_id[] = {
@@ -301,11 +310,10 @@ static struct i2c_driver z2_batt_driver = {
301 .driver = { 310 .driver = {
302 .name = "z2-battery", 311 .name = "z2-battery",
303 .owner = THIS_MODULE, 312 .owner = THIS_MODULE,
313 .pm = Z2_BATTERY_PM_OPS
304 }, 314 },
305 .probe = z2_batt_probe, 315 .probe = z2_batt_probe,
306 .remove = z2_batt_remove, 316 .remove = z2_batt_remove,
307 .suspend = z2_batt_suspend,
308 .resume = z2_batt_resume,
309 .id_table = z2_batt_id, 317 .id_table = z2_batt_id,
310}; 318};
311 319
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index f0b13a0d1851..d7ed20f293d7 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -297,5 +297,11 @@ config REGULATOR_TPS6524X
297 serial interface currently supported on the sequencer serial 297 serial interface currently supported on the sequencer serial
298 port controller. 298 port controller.
299 299
300config REGULATOR_TPS65910
301 tristate "TI TPS65910 Power Regulator"
302 depends on MFD_TPS65910
303 help
304 This driver supports TPS65910 voltage regulator chips.
305
300endif 306endif
301 307
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 165ff5371e9e..3932d2ec38f3 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -42,5 +42,6 @@ obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
42obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o 42obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
43obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o 43obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o
44obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o 44obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
45obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
45 46
46ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG 47ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 0fae51c4845a..d3e38790906e 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -158,6 +158,13 @@ static int regulator_check_consumers(struct regulator_dev *rdev,
158 struct regulator *regulator; 158 struct regulator *regulator;
159 159
160 list_for_each_entry(regulator, &rdev->consumer_list, list) { 160 list_for_each_entry(regulator, &rdev->consumer_list, list) {
161 /*
162 * Assume consumers that didn't say anything are OK
163 * with anything in the constraint range.
164 */
165 if (!regulator->min_uV && !regulator->max_uV)
166 continue;
167
161 if (*max_uV > regulator->max_uV) 168 if (*max_uV > regulator->max_uV)
162 *max_uV = regulator->max_uV; 169 *max_uV = regulator->max_uV;
163 if (*min_uV < regulator->min_uV) 170 if (*min_uV < regulator->min_uV)
@@ -197,9 +204,9 @@ static int regulator_check_current_limit(struct regulator_dev *rdev,
197} 204}
198 205
199/* operating mode constraint check */ 206/* operating mode constraint check */
200static int regulator_check_mode(struct regulator_dev *rdev, int mode) 207static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode)
201{ 208{
202 switch (mode) { 209 switch (*mode) {
203 case REGULATOR_MODE_FAST: 210 case REGULATOR_MODE_FAST:
204 case REGULATOR_MODE_NORMAL: 211 case REGULATOR_MODE_NORMAL:
205 case REGULATOR_MODE_IDLE: 212 case REGULATOR_MODE_IDLE:
@@ -217,11 +224,17 @@ static int regulator_check_mode(struct regulator_dev *rdev, int mode)
217 rdev_err(rdev, "operation not allowed\n"); 224 rdev_err(rdev, "operation not allowed\n");
218 return -EPERM; 225 return -EPERM;
219 } 226 }
220 if (!(rdev->constraints->valid_modes_mask & mode)) { 227
221 rdev_err(rdev, "invalid mode %x\n", mode); 228 /* The modes are bitmasks, the most power hungry modes having
222 return -EINVAL; 229 * the lowest values. If the requested mode isn't supported
230 * try higher modes. */
231 while (*mode) {
232 if (rdev->constraints->valid_modes_mask & *mode)
233 return 0;
234 *mode /= 2;
223 } 235 }
224 return 0; 236
237 return -EINVAL;
225} 238}
226 239
227/* dynamic regulator mode switching constraint check */ 240/* dynamic regulator mode switching constraint check */
@@ -612,7 +625,7 @@ static void drms_uA_update(struct regulator_dev *rdev)
612 output_uV, current_uA); 625 output_uV, current_uA);
613 626
614 /* check the new mode is allowed */ 627 /* check the new mode is allowed */
615 err = regulator_check_mode(rdev, mode); 628 err = regulator_mode_constrain(rdev, &mode);
616 if (err == 0) 629 if (err == 0)
617 rdev->desc->ops->set_mode(rdev, mode); 630 rdev->desc->ops->set_mode(rdev, mode);
618} 631}
@@ -718,6 +731,10 @@ static void print_constraints(struct regulator_dev *rdev)
718 count += sprintf(buf + count, "at %d mV ", ret / 1000); 731 count += sprintf(buf + count, "at %d mV ", ret / 1000);
719 } 732 }
720 733
734 if (constraints->uV_offset)
735 count += sprintf(buf, "%dmV offset ",
736 constraints->uV_offset / 1000);
737
721 if (constraints->min_uA && constraints->max_uA) { 738 if (constraints->min_uA && constraints->max_uA) {
722 if (constraints->min_uA == constraints->max_uA) 739 if (constraints->min_uA == constraints->max_uA)
723 count += sprintf(buf + count, "%d mA ", 740 count += sprintf(buf + count, "%d mA ",
@@ -1498,13 +1515,14 @@ static int _regulator_force_disable(struct regulator_dev *rdev,
1498 */ 1515 */
1499int regulator_force_disable(struct regulator *regulator) 1516int regulator_force_disable(struct regulator *regulator)
1500{ 1517{
1518 struct regulator_dev *rdev = regulator->rdev;
1501 struct regulator_dev *supply_rdev = NULL; 1519 struct regulator_dev *supply_rdev = NULL;
1502 int ret; 1520 int ret;
1503 1521
1504 mutex_lock(&regulator->rdev->mutex); 1522 mutex_lock(&rdev->mutex);
1505 regulator->uA_load = 0; 1523 regulator->uA_load = 0;
1506 ret = _regulator_force_disable(regulator->rdev, &supply_rdev); 1524 ret = _regulator_force_disable(rdev, &supply_rdev);
1507 mutex_unlock(&regulator->rdev->mutex); 1525 mutex_unlock(&rdev->mutex);
1508 1526
1509 if (supply_rdev) 1527 if (supply_rdev)
1510 regulator_disable(get_device_regulator(rdev_get_dev(supply_rdev))); 1528 regulator_disable(get_device_regulator(rdev_get_dev(supply_rdev)));
@@ -1634,6 +1652,9 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
1634 1652
1635 trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV); 1653 trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV);
1636 1654
1655 min_uV += rdev->constraints->uV_offset;
1656 max_uV += rdev->constraints->uV_offset;
1657
1637 if (rdev->desc->ops->set_voltage) { 1658 if (rdev->desc->ops->set_voltage) {
1638 ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV, 1659 ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV,
1639 &selector); 1660 &selector);
@@ -1858,18 +1879,22 @@ EXPORT_SYMBOL_GPL(regulator_sync_voltage);
1858 1879
1859static int _regulator_get_voltage(struct regulator_dev *rdev) 1880static int _regulator_get_voltage(struct regulator_dev *rdev)
1860{ 1881{
1861 int sel; 1882 int sel, ret;
1862 1883
1863 if (rdev->desc->ops->get_voltage_sel) { 1884 if (rdev->desc->ops->get_voltage_sel) {
1864 sel = rdev->desc->ops->get_voltage_sel(rdev); 1885 sel = rdev->desc->ops->get_voltage_sel(rdev);
1865 if (sel < 0) 1886 if (sel < 0)
1866 return sel; 1887 return sel;
1867 return rdev->desc->ops->list_voltage(rdev, sel); 1888 ret = rdev->desc->ops->list_voltage(rdev, sel);
1868 } 1889 } else if (rdev->desc->ops->get_voltage) {
1869 if (rdev->desc->ops->get_voltage) 1890 ret = rdev->desc->ops->get_voltage(rdev);
1870 return rdev->desc->ops->get_voltage(rdev); 1891 } else {
1871 else
1872 return -EINVAL; 1892 return -EINVAL;
1893 }
1894
1895 if (ret < 0)
1896 return ret;
1897 return ret - rdev->constraints->uV_offset;
1873} 1898}
1874 1899
1875/** 1900/**
@@ -2005,7 +2030,7 @@ int regulator_set_mode(struct regulator *regulator, unsigned int mode)
2005 } 2030 }
2006 2031
2007 /* constraints check */ 2032 /* constraints check */
2008 ret = regulator_check_mode(rdev, mode); 2033 ret = regulator_mode_constrain(rdev, &mode);
2009 if (ret < 0) 2034 if (ret < 0)
2010 goto out; 2035 goto out;
2011 2036
@@ -2081,16 +2106,26 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
2081 2106
2082 mutex_lock(&rdev->mutex); 2107 mutex_lock(&rdev->mutex);
2083 2108
2109 /*
2110 * first check to see if we can set modes at all, otherwise just
2111 * tell the consumer everything is OK.
2112 */
2084 regulator->uA_load = uA_load; 2113 regulator->uA_load = uA_load;
2085 ret = regulator_check_drms(rdev); 2114 ret = regulator_check_drms(rdev);
2086 if (ret < 0) 2115 if (ret < 0) {
2116 ret = 0;
2087 goto out; 2117 goto out;
2088 ret = -EINVAL; 2118 }
2089 2119
2090 /* sanity check */
2091 if (!rdev->desc->ops->get_optimum_mode) 2120 if (!rdev->desc->ops->get_optimum_mode)
2092 goto out; 2121 goto out;
2093 2122
2123 /*
2124 * we can actually do this so any errors are indicators of
2125 * potential real failure.
2126 */
2127 ret = -EINVAL;
2128
2094 /* get output voltage */ 2129 /* get output voltage */
2095 output_uV = _regulator_get_voltage(rdev); 2130 output_uV = _regulator_get_voltage(rdev);
2096 if (output_uV <= 0) { 2131 if (output_uV <= 0) {
@@ -2116,7 +2151,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
2116 mode = rdev->desc->ops->get_optimum_mode(rdev, 2151 mode = rdev->desc->ops->get_optimum_mode(rdev,
2117 input_uV, output_uV, 2152 input_uV, output_uV,
2118 total_uA_load); 2153 total_uA_load);
2119 ret = regulator_check_mode(rdev, mode); 2154 ret = regulator_mode_constrain(rdev, &mode);
2120 if (ret < 0) { 2155 if (ret < 0) {
2121 rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n", 2156 rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
2122 total_uA_load, input_uV, output_uV); 2157 total_uA_load, input_uV, output_uV);
@@ -2589,14 +2624,6 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
2589 if (ret < 0) 2624 if (ret < 0)
2590 goto scrub; 2625 goto scrub;
2591 2626
2592 /* set supply regulator if it exists */
2593 if (init_data->supply_regulator && init_data->supply_regulator_dev) {
2594 dev_err(dev,
2595 "Supply regulator specified by both name and dev\n");
2596 ret = -EINVAL;
2597 goto scrub;
2598 }
2599
2600 if (init_data->supply_regulator) { 2627 if (init_data->supply_regulator) {
2601 struct regulator_dev *r; 2628 struct regulator_dev *r;
2602 int found = 0; 2629 int found = 0;
@@ -2621,14 +2648,6 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
2621 goto scrub; 2648 goto scrub;
2622 } 2649 }
2623 2650
2624 if (init_data->supply_regulator_dev) {
2625 dev_warn(dev, "Uses supply_regulator_dev instead of regulator_supply\n");
2626 ret = set_supply(rdev,
2627 dev_get_drvdata(init_data->supply_regulator_dev));
2628 if (ret < 0)
2629 goto scrub;
2630 }
2631
2632 /* add consumers devices */ 2651 /* add consumers devices */
2633 for (i = 0; i < init_data->num_consumer_supplies; i++) { 2652 for (i = 0; i < init_data->num_consumer_supplies; i++) {
2634 ret = set_consumer_device_supply(rdev, 2653 ret = set_consumer_device_supply(rdev,
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 77e0cfb30b23..10d5a1d9768e 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -267,7 +267,6 @@ static int max8997_get_enable_register(struct regulator_dev *rdev,
267 default: 267 default:
268 /* Not controllable or not exists */ 268 /* Not controllable or not exists */
269 return -EINVAL; 269 return -EINVAL;
270 break;
271 } 270 }
272 271
273 return 0; 272 return 0;
@@ -1033,11 +1032,11 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
1033 1032
1034 /* For the safety, set max voltage before setting up */ 1033 /* For the safety, set max voltage before setting up */
1035 for (i = 0; i < 8; i++) { 1034 for (i = 0; i < 8; i++) {
1036 max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS(i + 1), 1035 max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS1 + i,
1037 max_buck1, 0x3f); 1036 max_buck1, 0x3f);
1038 max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS(i + 1), 1037 max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS1 + i,
1039 max_buck2, 0x3f); 1038 max_buck2, 0x3f);
1040 max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS(i + 1), 1039 max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS1 + i,
1041 max_buck5, 0x3f); 1040 max_buck5, 0x3f);
1042 } 1041 }
1043 1042
@@ -1114,13 +1113,13 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
1114 1113
1115 /* Initialize all the DVS related BUCK registers */ 1114 /* Initialize all the DVS related BUCK registers */
1116 for (i = 0; i < 8; i++) { 1115 for (i = 0; i < 8; i++) {
1117 max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS(i + 1), 1116 max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS1 + i,
1118 max8997->buck1_vol[i], 1117 max8997->buck1_vol[i],
1119 0x3f); 1118 0x3f);
1120 max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS(i + 1), 1119 max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS1 + i,
1121 max8997->buck2_vol[i], 1120 max8997->buck2_vol[i],
1122 0x3f); 1121 0x3f);
1123 max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS(i + 1), 1122 max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS1 + i,
1124 max8997->buck5_vol[i], 1123 max8997->buck5_vol[i],
1125 0x3f); 1124 0x3f);
1126 } 1125 }
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index f57e9c42fdb4..41a1495eec2b 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -732,13 +732,15 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
732 if (!pdata->buck1_set1) { 732 if (!pdata->buck1_set1) {
733 printk(KERN_ERR "MAX8998 SET1 GPIO defined as 0 !\n"); 733 printk(KERN_ERR "MAX8998 SET1 GPIO defined as 0 !\n");
734 WARN_ON(!pdata->buck1_set1); 734 WARN_ON(!pdata->buck1_set1);
735 return -EIO; 735 ret = -EIO;
736 goto err_free_mem;
736 } 737 }
737 /* Check if SET2 is not equal to 0 */ 738 /* Check if SET2 is not equal to 0 */
738 if (!pdata->buck1_set2) { 739 if (!pdata->buck1_set2) {
739 printk(KERN_ERR "MAX8998 SET2 GPIO defined as 0 !\n"); 740 printk(KERN_ERR "MAX8998 SET2 GPIO defined as 0 !\n");
740 WARN_ON(!pdata->buck1_set2); 741 WARN_ON(!pdata->buck1_set2);
741 return -EIO; 742 ret = -EIO;
743 goto err_free_mem;
742 } 744 }
743 745
744 gpio_request(pdata->buck1_set1, "MAX8998 BUCK1_SET1"); 746 gpio_request(pdata->buck1_set1, "MAX8998 BUCK1_SET1");
@@ -758,7 +760,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
758 max8998->buck1_vol[0] = i; 760 max8998->buck1_vol[0] = i;
759 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i); 761 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i);
760 if (ret) 762 if (ret)
761 return ret; 763 goto err_free_mem;
762 764
763 /* Set predefined value for BUCK1 register 2 */ 765 /* Set predefined value for BUCK1 register 2 */
764 i = 0; 766 i = 0;
@@ -770,7 +772,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
770 max8998->buck1_vol[1] = i; 772 max8998->buck1_vol[1] = i;
771 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE2, i); 773 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE2, i);
772 if (ret) 774 if (ret)
773 return ret; 775 goto err_free_mem;
774 776
775 /* Set predefined value for BUCK1 register 3 */ 777 /* Set predefined value for BUCK1 register 3 */
776 i = 0; 778 i = 0;
@@ -782,7 +784,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
782 max8998->buck1_vol[2] = i; 784 max8998->buck1_vol[2] = i;
783 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE3, i); 785 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE3, i);
784 if (ret) 786 if (ret)
785 return ret; 787 goto err_free_mem;
786 788
787 /* Set predefined value for BUCK1 register 4 */ 789 /* Set predefined value for BUCK1 register 4 */
788 i = 0; 790 i = 0;
@@ -794,7 +796,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
794 max8998->buck1_vol[3] = i; 796 max8998->buck1_vol[3] = i;
795 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE4, i); 797 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE4, i);
796 if (ret) 798 if (ret)
797 return ret; 799 goto err_free_mem;
798 800
799 } 801 }
800 802
@@ -803,7 +805,8 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
803 if (!pdata->buck2_set3) { 805 if (!pdata->buck2_set3) {
804 printk(KERN_ERR "MAX8998 SET3 GPIO defined as 0 !\n"); 806 printk(KERN_ERR "MAX8998 SET3 GPIO defined as 0 !\n");
805 WARN_ON(!pdata->buck2_set3); 807 WARN_ON(!pdata->buck2_set3);
806 return -EIO; 808 ret = -EIO;
809 goto err_free_mem;
807 } 810 }
808 gpio_request(pdata->buck2_set3, "MAX8998 BUCK2_SET3"); 811 gpio_request(pdata->buck2_set3, "MAX8998 BUCK2_SET3");
809 gpio_direction_output(pdata->buck2_set3, 812 gpio_direction_output(pdata->buck2_set3,
@@ -818,7 +821,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
818 max8998->buck2_vol[0] = i; 821 max8998->buck2_vol[0] = i;
819 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i); 822 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i);
820 if (ret) 823 if (ret)
821 return ret; 824 goto err_free_mem;
822 825
823 /* BUCK2 register 2 */ 826 /* BUCK2 register 2 */
824 i = 0; 827 i = 0;
@@ -830,7 +833,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
830 max8998->buck2_vol[1] = i; 833 max8998->buck2_vol[1] = i;
831 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE2, i); 834 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE2, i);
832 if (ret) 835 if (ret)
833 return ret; 836 goto err_free_mem;
834 } 837 }
835 838
836 for (i = 0; i < pdata->num_regulators; i++) { 839 for (i = 0; i < pdata->num_regulators; i++) {
@@ -860,6 +863,7 @@ err:
860 if (rdev[i]) 863 if (rdev[i])
861 regulator_unregister(rdev[i]); 864 regulator_unregister(rdev[i]);
862 865
866err_free_mem:
863 kfree(max8998->rdev); 867 kfree(max8998->rdev);
864 kfree(max8998); 868 kfree(max8998);
865 869
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 1b8f7398a4a8..3285d41842f2 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -431,7 +431,8 @@ static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev,
431 int min_uV, int max_uV, unsigned *selector) 431 int min_uV, int max_uV, unsigned *selector)
432{ 432{
433 struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); 433 struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
434 int hi, value, val, mask, id = rdev_get_id(rdev); 434 int hi, value, mask, id = rdev_get_id(rdev);
435 u32 valread;
435 int ret; 436 int ret;
436 437
437 dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n", 438 dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
@@ -447,15 +448,16 @@ static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev,
447 448
448 mc13xxx_lock(priv->mc13xxx); 449 mc13xxx_lock(priv->mc13xxx);
449 ret = mc13xxx_reg_read(priv->mc13xxx, 450 ret = mc13xxx_reg_read(priv->mc13xxx,
450 mc13892_regulators[id].vsel_reg, &val); 451 mc13892_regulators[id].vsel_reg, &valread);
451 if (ret) 452 if (ret)
452 goto err; 453 goto err;
453 454
454 hi = val & MC13892_SWITCHERS0_SWxHI; 455 if (value > 1375000)
455 if (value > 1375)
456 hi = 1; 456 hi = 1;
457 if (value < 1100) 457 else if (value < 1100000)
458 hi = 0; 458 hi = 0;
459 else
460 hi = valread & MC13892_SWITCHERS0_SWxHI;
459 461
460 if (hi) { 462 if (hi) {
461 value = (value - 1100000) / 25000; 463 value = (value - 1100000) / 25000;
@@ -464,8 +466,10 @@ static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev,
464 value = (value - 600000) / 25000; 466 value = (value - 600000) / 25000;
465 467
466 mask = mc13892_regulators[id].vsel_mask | MC13892_SWITCHERS0_SWxHI; 468 mask = mc13892_regulators[id].vsel_mask | MC13892_SWITCHERS0_SWxHI;
467 ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg, 469 valread = (valread & ~mask) |
468 mask, value << mc13892_regulators[id].vsel_shift); 470 (value << mc13892_regulators[id].vsel_shift);
471 ret = mc13xxx_reg_write(priv->mc13xxx, mc13892_regulators[id].vsel_reg,
472 valread);
469err: 473err:
470 mc13xxx_unlock(priv->mc13xxx); 474 mc13xxx_unlock(priv->mc13xxx);
471 475
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index 2bb5de1f2421..bc27ab136378 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -174,7 +174,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
174 174
175 dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val); 175 dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
176 176
177 BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages); 177 BUG_ON(val >= mc13xxx_regulators[id].desc.n_voltages);
178 178
179 return mc13xxx_regulators[id].voltages[val]; 179 return mc13xxx_regulators[id].voltages[val];
180} 180}
diff --git a/drivers/regulator/tps6105x-regulator.c b/drivers/regulator/tps6105x-regulator.c
index a4d7f4540c18..1011873896dc 100644
--- a/drivers/regulator/tps6105x-regulator.c
+++ b/drivers/regulator/tps6105x-regulator.c
@@ -158,6 +158,7 @@ static int __devinit tps6105x_regulator_probe(struct platform_device *pdev)
158 "failed to register regulator\n"); 158 "failed to register regulator\n");
159 return ret; 159 return ret;
160 } 160 }
161 platform_set_drvdata(pdev, tps6105x);
161 162
162 return 0; 163 return 0;
163} 164}
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 60a7ca5409e9..fbddc15e1811 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -466,7 +466,6 @@ static struct regulator_ops tps65023_ldo_ops = {
466static int __devinit tps_65023_probe(struct i2c_client *client, 466static int __devinit tps_65023_probe(struct i2c_client *client,
467 const struct i2c_device_id *id) 467 const struct i2c_device_id *id)
468{ 468{
469 static int desc_id;
470 const struct tps_info *info = (void *)id->driver_data; 469 const struct tps_info *info = (void *)id->driver_data;
471 struct regulator_init_data *init_data; 470 struct regulator_init_data *init_data;
472 struct regulator_dev *rdev; 471 struct regulator_dev *rdev;
@@ -499,7 +498,7 @@ static int __devinit tps_65023_probe(struct i2c_client *client,
499 tps->info[i] = info; 498 tps->info[i] = info;
500 499
501 tps->desc[i].name = info->name; 500 tps->desc[i].name = info->name;
502 tps->desc[i].id = desc_id++; 501 tps->desc[i].id = i;
503 tps->desc[i].n_voltages = num_voltages[i]; 502 tps->desc[i].n_voltages = num_voltages[i];
504 tps->desc[i].ops = (i > TPS65023_DCDC_3 ? 503 tps->desc[i].ops = (i > TPS65023_DCDC_3 ?
505 &tps65023_ldo_ops : &tps65023_dcdc_ops); 504 &tps65023_ldo_ops : &tps65023_dcdc_ops);
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 064755290599..bfffabc21eda 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -553,7 +553,6 @@ static __devinit
553int tps6507x_pmic_probe(struct platform_device *pdev) 553int tps6507x_pmic_probe(struct platform_device *pdev)
554{ 554{
555 struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent); 555 struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
556 static int desc_id;
557 struct tps_info *info = &tps6507x_pmic_regs[0]; 556 struct tps_info *info = &tps6507x_pmic_regs[0];
558 struct regulator_init_data *init_data; 557 struct regulator_init_data *init_data;
559 struct regulator_dev *rdev; 558 struct regulator_dev *rdev;
@@ -598,7 +597,7 @@ int tps6507x_pmic_probe(struct platform_device *pdev)
598 } 597 }
599 598
600 tps->desc[i].name = info->name; 599 tps->desc[i].name = info->name;
601 tps->desc[i].id = desc_id++; 600 tps->desc[i].id = i;
602 tps->desc[i].n_voltages = num_voltages[i]; 601 tps->desc[i].n_voltages = num_voltages[i];
603 tps->desc[i].ops = (i > TPS6507X_DCDC_3 ? 602 tps->desc[i].ops = (i > TPS6507X_DCDC_3 ?
604 &tps6507x_pmic_ldo_ops : &tps6507x_pmic_dcdc_ops); 603 &tps6507x_pmic_ldo_ops : &tps6507x_pmic_dcdc_ops);
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
new file mode 100644
index 000000000000..55dd4e6650db
--- /dev/null
+++ b/drivers/regulator/tps65910-regulator.c
@@ -0,0 +1,993 @@
1/*
2 * tps65910.c -- TI tps65910
3 *
4 * Copyright 2010 Texas Instruments Inc.
5 *
6 * Author: Graeme Gregory <gg@slimlogic.co.uk>
7 * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/err.h>
20#include <linux/platform_device.h>
21#include <linux/regulator/driver.h>
22#include <linux/regulator/machine.h>
23#include <linux/delay.h>
24#include <linux/slab.h>
25#include <linux/gpio.h>
26#include <linux/mfd/tps65910.h>
27
28#define TPS65910_REG_VRTC 0
29#define TPS65910_REG_VIO 1
30#define TPS65910_REG_VDD1 2
31#define TPS65910_REG_VDD2 3
32#define TPS65910_REG_VDD3 4
33#define TPS65910_REG_VDIG1 5
34#define TPS65910_REG_VDIG2 6
35#define TPS65910_REG_VPLL 7
36#define TPS65910_REG_VDAC 8
37#define TPS65910_REG_VAUX1 9
38#define TPS65910_REG_VAUX2 10
39#define TPS65910_REG_VAUX33 11
40#define TPS65910_REG_VMMC 12
41
42#define TPS65911_REG_VDDCTRL 4
43#define TPS65911_REG_LDO1 5
44#define TPS65911_REG_LDO2 6
45#define TPS65911_REG_LDO3 7
46#define TPS65911_REG_LDO4 8
47#define TPS65911_REG_LDO5 9
48#define TPS65911_REG_LDO6 10
49#define TPS65911_REG_LDO7 11
50#define TPS65911_REG_LDO8 12
51
52#define TPS65910_NUM_REGULATOR 13
53#define TPS65910_SUPPLY_STATE_ENABLED 0x1
54
55/* supported VIO voltages in milivolts */
56static const u16 VIO_VSEL_table[] = {
57 1500, 1800, 2500, 3300,
58};
59
60/* VSEL tables for TPS65910 specific LDOs and dcdc's */
61
62/* supported VDD3 voltages in milivolts */
63static const u16 VDD3_VSEL_table[] = {
64 5000,
65};
66
67/* supported VDIG1 voltages in milivolts */
68static const u16 VDIG1_VSEL_table[] = {
69 1200, 1500, 1800, 2700,
70};
71
72/* supported VDIG2 voltages in milivolts */
73static const u16 VDIG2_VSEL_table[] = {
74 1000, 1100, 1200, 1800,
75};
76
77/* supported VPLL voltages in milivolts */
78static const u16 VPLL_VSEL_table[] = {
79 1000, 1100, 1800, 2500,
80};
81
82/* supported VDAC voltages in milivolts */
83static const u16 VDAC_VSEL_table[] = {
84 1800, 2600, 2800, 2850,
85};
86
87/* supported VAUX1 voltages in milivolts */
88static const u16 VAUX1_VSEL_table[] = {
89 1800, 2500, 2800, 2850,
90};
91
92/* supported VAUX2 voltages in milivolts */
93static const u16 VAUX2_VSEL_table[] = {
94 1800, 2800, 2900, 3300,
95};
96
97/* supported VAUX33 voltages in milivolts */
98static const u16 VAUX33_VSEL_table[] = {
99 1800, 2000, 2800, 3300,
100};
101
102/* supported VMMC voltages in milivolts */
103static const u16 VMMC_VSEL_table[] = {
104 1800, 2800, 3000, 3300,
105};
106
107struct tps_info {
108 const char *name;
109 unsigned min_uV;
110 unsigned max_uV;
111 u8 table_len;
112 const u16 *table;
113};
114
115static struct tps_info tps65910_regs[] = {
116 {
117 .name = "VRTC",
118 },
119 {
120 .name = "VIO",
121 .min_uV = 1500000,
122 .max_uV = 3300000,
123 .table_len = ARRAY_SIZE(VIO_VSEL_table),
124 .table = VIO_VSEL_table,
125 },
126 {
127 .name = "VDD1",
128 .min_uV = 600000,
129 .max_uV = 4500000,
130 },
131 {
132 .name = "VDD2",
133 .min_uV = 600000,
134 .max_uV = 4500000,
135 },
136 {
137 .name = "VDD3",
138 .min_uV = 5000000,
139 .max_uV = 5000000,
140 .table_len = ARRAY_SIZE(VDD3_VSEL_table),
141 .table = VDD3_VSEL_table,
142 },
143 {
144 .name = "VDIG1",
145 .min_uV = 1200000,
146 .max_uV = 2700000,
147 .table_len = ARRAY_SIZE(VDIG1_VSEL_table),
148 .table = VDIG1_VSEL_table,
149 },
150 {
151 .name = "VDIG2",
152 .min_uV = 1000000,
153 .max_uV = 1800000,
154 .table_len = ARRAY_SIZE(VDIG2_VSEL_table),
155 .table = VDIG2_VSEL_table,
156 },
157 {
158 .name = "VPLL",
159 .min_uV = 1000000,
160 .max_uV = 2500000,
161 .table_len = ARRAY_SIZE(VPLL_VSEL_table),
162 .table = VPLL_VSEL_table,
163 },
164 {
165 .name = "VDAC",
166 .min_uV = 1800000,
167 .max_uV = 2850000,
168 .table_len = ARRAY_SIZE(VDAC_VSEL_table),
169 .table = VDAC_VSEL_table,
170 },
171 {
172 .name = "VAUX1",
173 .min_uV = 1800000,
174 .max_uV = 2850000,
175 .table_len = ARRAY_SIZE(VAUX1_VSEL_table),
176 .table = VAUX1_VSEL_table,
177 },
178 {
179 .name = "VAUX2",
180 .min_uV = 1800000,
181 .max_uV = 3300000,
182 .table_len = ARRAY_SIZE(VAUX2_VSEL_table),
183 .table = VAUX2_VSEL_table,
184 },
185 {
186 .name = "VAUX33",
187 .min_uV = 1800000,
188 .max_uV = 3300000,
189 .table_len = ARRAY_SIZE(VAUX33_VSEL_table),
190 .table = VAUX33_VSEL_table,
191 },
192 {
193 .name = "VMMC",
194 .min_uV = 1800000,
195 .max_uV = 3300000,
196 .table_len = ARRAY_SIZE(VMMC_VSEL_table),
197 .table = VMMC_VSEL_table,
198 },
199};
200
201static struct tps_info tps65911_regs[] = {
202 {
203 .name = "VIO",
204 .min_uV = 1500000,
205 .max_uV = 3300000,
206 .table_len = ARRAY_SIZE(VIO_VSEL_table),
207 .table = VIO_VSEL_table,
208 },
209 {
210 .name = "VDD1",
211 .min_uV = 600000,
212 .max_uV = 4500000,
213 },
214 {
215 .name = "VDD2",
216 .min_uV = 600000,
217 .max_uV = 4500000,
218 },
219 {
220 .name = "VDDCTRL",
221 .min_uV = 600000,
222 .max_uV = 1400000,
223 },
224 {
225 .name = "LDO1",
226 .min_uV = 1000000,
227 .max_uV = 3300000,
228 },
229 {
230 .name = "LDO2",
231 .min_uV = 1000000,
232 .max_uV = 3300000,
233 },
234 {
235 .name = "LDO3",
236 .min_uV = 1000000,
237 .max_uV = 3300000,
238 },
239 {
240 .name = "LDO4",
241 .min_uV = 1000000,
242 .max_uV = 3300000,
243 },
244 {
245 .name = "LDO5",
246 .min_uV = 1000000,
247 .max_uV = 3300000,
248 },
249 {
250 .name = "LDO6",
251 .min_uV = 1000000,
252 .max_uV = 3300000,
253 },
254 {
255 .name = "LDO7",
256 .min_uV = 1000000,
257 .max_uV = 3300000,
258 },
259 {
260 .name = "LDO8",
261 .min_uV = 1000000,
262 .max_uV = 3300000,
263 },
264};
265
266struct tps65910_reg {
267 struct regulator_desc desc[TPS65910_NUM_REGULATOR];
268 struct tps65910 *mfd;
269 struct regulator_dev *rdev[TPS65910_NUM_REGULATOR];
270 struct tps_info *info[TPS65910_NUM_REGULATOR];
271 struct mutex mutex;
272 int mode;
273 int (*get_ctrl_reg)(int);
274};
275
276static inline int tps65910_read(struct tps65910_reg *pmic, u8 reg)
277{
278 u8 val;
279 int err;
280
281 err = pmic->mfd->read(pmic->mfd, reg, 1, &val);
282 if (err)
283 return err;
284
285 return val;
286}
287
288static inline int tps65910_write(struct tps65910_reg *pmic, u8 reg, u8 val)
289{
290 return pmic->mfd->write(pmic->mfd, reg, 1, &val);
291}
292
293static int tps65910_modify_bits(struct tps65910_reg *pmic, u8 reg,
294 u8 set_mask, u8 clear_mask)
295{
296 int err, data;
297
298 mutex_lock(&pmic->mutex);
299
300 data = tps65910_read(pmic, reg);
301 if (data < 0) {
302 dev_err(pmic->mfd->dev, "Read from reg 0x%x failed\n", reg);
303 err = data;
304 goto out;
305 }
306
307 data &= ~clear_mask;
308 data |= set_mask;
309 err = tps65910_write(pmic, reg, data);
310 if (err)
311 dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
312
313out:
314 mutex_unlock(&pmic->mutex);
315 return err;
316}
317
318static int tps65910_reg_read(struct tps65910_reg *pmic, u8 reg)
319{
320 int data;
321
322 mutex_lock(&pmic->mutex);
323
324 data = tps65910_read(pmic, reg);
325 if (data < 0)
326 dev_err(pmic->mfd->dev, "Read from reg 0x%x failed\n", reg);
327
328 mutex_unlock(&pmic->mutex);
329 return data;
330}
331
332static int tps65910_reg_write(struct tps65910_reg *pmic, u8 reg, u8 val)
333{
334 int err;
335
336 mutex_lock(&pmic->mutex);
337
338 err = tps65910_write(pmic, reg, val);
339 if (err < 0)
340 dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
341
342 mutex_unlock(&pmic->mutex);
343 return err;
344}
345
346static int tps65910_get_ctrl_register(int id)
347{
348 switch (id) {
349 case TPS65910_REG_VRTC:
350 return TPS65910_VRTC;
351 case TPS65910_REG_VIO:
352 return TPS65910_VIO;
353 case TPS65910_REG_VDD1:
354 return TPS65910_VDD1;
355 case TPS65910_REG_VDD2:
356 return TPS65910_VDD2;
357 case TPS65910_REG_VDD3:
358 return TPS65910_VDD3;
359 case TPS65910_REG_VDIG1:
360 return TPS65910_VDIG1;
361 case TPS65910_REG_VDIG2:
362 return TPS65910_VDIG2;
363 case TPS65910_REG_VPLL:
364 return TPS65910_VPLL;
365 case TPS65910_REG_VDAC:
366 return TPS65910_VDAC;
367 case TPS65910_REG_VAUX1:
368 return TPS65910_VAUX1;
369 case TPS65910_REG_VAUX2:
370 return TPS65910_VAUX2;
371 case TPS65910_REG_VAUX33:
372 return TPS65910_VAUX33;
373 case TPS65910_REG_VMMC:
374 return TPS65910_VMMC;
375 default:
376 return -EINVAL;
377 }
378}
379
380static int tps65911_get_ctrl_register(int id)
381{
382 switch (id) {
383 case TPS65910_REG_VRTC:
384 return TPS65910_VRTC;
385 case TPS65910_REG_VIO:
386 return TPS65910_VIO;
387 case TPS65910_REG_VDD1:
388 return TPS65910_VDD1;
389 case TPS65910_REG_VDD2:
390 return TPS65910_VDD2;
391 case TPS65911_REG_VDDCTRL:
392 return TPS65911_VDDCTRL;
393 case TPS65911_REG_LDO1:
394 return TPS65911_LDO1;
395 case TPS65911_REG_LDO2:
396 return TPS65911_LDO2;
397 case TPS65911_REG_LDO3:
398 return TPS65911_LDO3;
399 case TPS65911_REG_LDO4:
400 return TPS65911_LDO4;
401 case TPS65911_REG_LDO5:
402 return TPS65911_LDO5;
403 case TPS65911_REG_LDO6:
404 return TPS65911_LDO6;
405 case TPS65911_REG_LDO7:
406 return TPS65911_LDO7;
407 case TPS65911_REG_LDO8:
408 return TPS65911_LDO8;
409 default:
410 return -EINVAL;
411 }
412}
413
414static int tps65910_is_enabled(struct regulator_dev *dev)
415{
416 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
417 int reg, value, id = rdev_get_id(dev);
418
419 reg = pmic->get_ctrl_reg(id);
420 if (reg < 0)
421 return reg;
422
423 value = tps65910_reg_read(pmic, reg);
424 if (value < 0)
425 return value;
426
427 return value & TPS65910_SUPPLY_STATE_ENABLED;
428}
429
430static int tps65910_enable(struct regulator_dev *dev)
431{
432 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
433 struct tps65910 *mfd = pmic->mfd;
434 int reg, id = rdev_get_id(dev);
435
436 reg = pmic->get_ctrl_reg(id);
437 if (reg < 0)
438 return reg;
439
440 return tps65910_set_bits(mfd, reg, TPS65910_SUPPLY_STATE_ENABLED);
441}
442
443static int tps65910_disable(struct regulator_dev *dev)
444{
445 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
446 struct tps65910 *mfd = pmic->mfd;
447 int reg, id = rdev_get_id(dev);
448
449 reg = pmic->get_ctrl_reg(id);
450 if (reg < 0)
451 return reg;
452
453 return tps65910_clear_bits(mfd, reg, TPS65910_SUPPLY_STATE_ENABLED);
454}
455
456
457static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode)
458{
459 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
460 struct tps65910 *mfd = pmic->mfd;
461 int reg, value, id = rdev_get_id(dev);
462
463 reg = pmic->get_ctrl_reg(id);
464 if (reg < 0)
465 return reg;
466
467 switch (mode) {
468 case REGULATOR_MODE_NORMAL:
469 return tps65910_modify_bits(pmic, reg, LDO_ST_ON_BIT,
470 LDO_ST_MODE_BIT);
471 case REGULATOR_MODE_IDLE:
472 value = LDO_ST_ON_BIT | LDO_ST_MODE_BIT;
473 return tps65910_set_bits(mfd, reg, value);
474 case REGULATOR_MODE_STANDBY:
475 return tps65910_clear_bits(mfd, reg, LDO_ST_ON_BIT);
476 }
477
478 return -EINVAL;
479}
480
481static unsigned int tps65910_get_mode(struct regulator_dev *dev)
482{
483 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
484 int reg, value, id = rdev_get_id(dev);
485
486 reg = pmic->get_ctrl_reg(id);
487 if (reg < 0)
488 return reg;
489
490 value = tps65910_reg_read(pmic, reg);
491 if (value < 0)
492 return value;
493
494 if (value & LDO_ST_ON_BIT)
495 return REGULATOR_MODE_STANDBY;
496 else if (value & LDO_ST_MODE_BIT)
497 return REGULATOR_MODE_IDLE;
498 else
499 return REGULATOR_MODE_NORMAL;
500}
501
502static int tps65910_get_voltage_dcdc(struct regulator_dev *dev)
503{
504 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
505 int id = rdev_get_id(dev), voltage = 0;
506 int opvsel = 0, srvsel = 0, vselmax = 0, mult = 0, sr = 0;
507
508 switch (id) {
509 case TPS65910_REG_VDD1:
510 opvsel = tps65910_reg_read(pmic, TPS65910_VDD1_OP);
511 mult = tps65910_reg_read(pmic, TPS65910_VDD1);
512 mult = (mult & VDD1_VGAIN_SEL_MASK) >> VDD1_VGAIN_SEL_SHIFT;
513 srvsel = tps65910_reg_read(pmic, TPS65910_VDD1_SR);
514 sr = opvsel & VDD1_OP_CMD_MASK;
515 opvsel &= VDD1_OP_SEL_MASK;
516 srvsel &= VDD1_SR_SEL_MASK;
517 vselmax = 75;
518 break;
519 case TPS65910_REG_VDD2:
520 opvsel = tps65910_reg_read(pmic, TPS65910_VDD2_OP);
521 mult = tps65910_reg_read(pmic, TPS65910_VDD2);
522 mult = (mult & VDD2_VGAIN_SEL_MASK) >> VDD2_VGAIN_SEL_SHIFT;
523 srvsel = tps65910_reg_read(pmic, TPS65910_VDD2_SR);
524 sr = opvsel & VDD2_OP_CMD_MASK;
525 opvsel &= VDD2_OP_SEL_MASK;
526 srvsel &= VDD2_SR_SEL_MASK;
527 vselmax = 75;
528 break;
529 case TPS65911_REG_VDDCTRL:
530 opvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_OP);
531 srvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_SR);
532 sr = opvsel & VDDCTRL_OP_CMD_MASK;
533 opvsel &= VDDCTRL_OP_SEL_MASK;
534 srvsel &= VDDCTRL_SR_SEL_MASK;
535 vselmax = 64;
536 break;
537 }
538
539 /* multiplier 0 == 1 but 2,3 normal */
540 if (!mult)
541 mult=1;
542
543 if (sr) {
544 /* normalise to valid range */
545 if (srvsel < 3)
546 srvsel = 3;
547 if (srvsel > vselmax)
548 srvsel = vselmax;
549 srvsel -= 3;
550
551 voltage = (srvsel * VDD1_2_OFFSET + VDD1_2_MIN_VOLT) * 100;
552 } else {
553
554 /* normalise to valid range*/
555 if (opvsel < 3)
556 opvsel = 3;
557 if (opvsel > vselmax)
558 opvsel = vselmax;
559 opvsel -= 3;
560
561 voltage = (opvsel * VDD1_2_OFFSET + VDD1_2_MIN_VOLT) * 100;
562 }
563
564 voltage *= mult;
565
566 return voltage;
567}
568
569static int tps65910_get_voltage(struct regulator_dev *dev)
570{
571 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
572 int reg, value, id = rdev_get_id(dev), voltage = 0;
573
574 reg = pmic->get_ctrl_reg(id);
575 if (reg < 0)
576 return reg;
577
578 value = tps65910_reg_read(pmic, reg);
579 if (value < 0)
580 return value;
581
582 switch (id) {
583 case TPS65910_REG_VIO:
584 case TPS65910_REG_VDIG1:
585 case TPS65910_REG_VDIG2:
586 case TPS65910_REG_VPLL:
587 case TPS65910_REG_VDAC:
588 case TPS65910_REG_VAUX1:
589 case TPS65910_REG_VAUX2:
590 case TPS65910_REG_VAUX33:
591 case TPS65910_REG_VMMC:
592 value &= LDO_SEL_MASK;
593 value >>= LDO_SEL_SHIFT;
594 break;
595 default:
596 return -EINVAL;
597 }
598
599 voltage = pmic->info[id]->table[value] * 1000;
600
601 return voltage;
602}
603
604static int tps65910_get_voltage_vdd3(struct regulator_dev *dev)
605{
606 return 5 * 1000 * 1000;
607}
608
609static int tps65911_get_voltage(struct regulator_dev *dev)
610{
611 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
612 int step_mv, id = rdev_get_id(dev);
613 u8 value, reg;
614
615 reg = pmic->get_ctrl_reg(id);
616
617 value = tps65910_reg_read(pmic, reg);
618
619 switch (id) {
620 case TPS65911_REG_LDO1:
621 case TPS65911_REG_LDO2:
622 case TPS65911_REG_LDO4:
623 value &= LDO1_SEL_MASK;
624 value >>= LDO_SEL_SHIFT;
625 /* The first 5 values of the selector correspond to 1V */
626 if (value < 5)
627 value = 0;
628 else
629 value -= 4;
630
631 step_mv = 50;
632 break;
633 case TPS65911_REG_LDO3:
634 case TPS65911_REG_LDO5:
635 case TPS65911_REG_LDO6:
636 case TPS65911_REG_LDO7:
637 case TPS65911_REG_LDO8:
638 value &= LDO3_SEL_MASK;
639 value >>= LDO_SEL_SHIFT;
640 /* The first 3 values of the selector correspond to 1V */
641 if (value < 3)
642 value = 0;
643 else
644 value -= 2;
645
646 step_mv = 100;
647 break;
648 case TPS65910_REG_VIO:
649 return pmic->info[id]->table[value] * 1000;
650 break;
651 default:
652 return -EINVAL;
653 }
654
655 return (LDO_MIN_VOLT + value * step_mv) * 1000;
656}
657
658static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
659 unsigned selector)
660{
661 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
662 int id = rdev_get_id(dev), vsel;
663 int dcdc_mult = 0;
664
665 switch (id) {
666 case TPS65910_REG_VDD1:
667 dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1;
668 if (dcdc_mult == 1)
669 dcdc_mult--;
670 vsel = (selector % VDD1_2_NUM_VOLTS) + 3;
671
672 tps65910_modify_bits(pmic, TPS65910_VDD1,
673 (dcdc_mult << VDD1_VGAIN_SEL_SHIFT),
674 VDD1_VGAIN_SEL_MASK);
675 tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel);
676 break;
677 case TPS65910_REG_VDD2:
678 dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1;
679 if (dcdc_mult == 1)
680 dcdc_mult--;
681 vsel = (selector % VDD1_2_NUM_VOLTS) + 3;
682
683 tps65910_modify_bits(pmic, TPS65910_VDD2,
684 (dcdc_mult << VDD2_VGAIN_SEL_SHIFT),
685 VDD1_VGAIN_SEL_MASK);
686 tps65910_reg_write(pmic, TPS65910_VDD2_OP, vsel);
687 break;
688 case TPS65911_REG_VDDCTRL:
689 vsel = selector;
690 tps65910_reg_write(pmic, TPS65911_VDDCTRL_OP, vsel);
691 }
692
693 return 0;
694}
695
696static int tps65910_set_voltage(struct regulator_dev *dev, unsigned selector)
697{
698 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
699 int reg, id = rdev_get_id(dev);
700
701 reg = pmic->get_ctrl_reg(id);
702 if (reg < 0)
703 return reg;
704
705 switch (id) {
706 case TPS65910_REG_VIO:
707 case TPS65910_REG_VDIG1:
708 case TPS65910_REG_VDIG2:
709 case TPS65910_REG_VPLL:
710 case TPS65910_REG_VDAC:
711 case TPS65910_REG_VAUX1:
712 case TPS65910_REG_VAUX2:
713 case TPS65910_REG_VAUX33:
714 case TPS65910_REG_VMMC:
715 return tps65910_modify_bits(pmic, reg,
716 (selector << LDO_SEL_SHIFT), LDO_SEL_MASK);
717 }
718
719 return -EINVAL;
720}
721
722static int tps65911_set_voltage(struct regulator_dev *dev, unsigned selector)
723{
724 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
725 int reg, id = rdev_get_id(dev);
726
727 reg = pmic->get_ctrl_reg(id);
728 if (reg < 0)
729 return reg;
730
731 switch (id) {
732 case TPS65911_REG_LDO1:
733 case TPS65911_REG_LDO2:
734 case TPS65911_REG_LDO4:
735 return tps65910_modify_bits(pmic, reg,
736 (selector << LDO_SEL_SHIFT), LDO1_SEL_MASK);
737 case TPS65911_REG_LDO3:
738 case TPS65911_REG_LDO5:
739 case TPS65911_REG_LDO6:
740 case TPS65911_REG_LDO7:
741 case TPS65911_REG_LDO8:
742 case TPS65910_REG_VIO:
743 return tps65910_modify_bits(pmic, reg,
744 (selector << LDO_SEL_SHIFT), LDO3_SEL_MASK);
745 }
746
747 return -EINVAL;
748}
749
750
751static int tps65910_list_voltage_dcdc(struct regulator_dev *dev,
752 unsigned selector)
753{
754 int volt, mult = 1, id = rdev_get_id(dev);
755
756 switch (id) {
757 case TPS65910_REG_VDD1:
758 case TPS65910_REG_VDD2:
759 mult = (selector / VDD1_2_NUM_VOLTS) + 1;
760 volt = VDD1_2_MIN_VOLT +
761 (selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET;
762 case TPS65911_REG_VDDCTRL:
763 volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET);
764 }
765
766 return volt * 100 * mult;
767}
768
769static int tps65910_list_voltage(struct regulator_dev *dev,
770 unsigned selector)
771{
772 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
773 int id = rdev_get_id(dev), voltage;
774
775 if (id < TPS65910_REG_VIO || id > TPS65910_REG_VMMC)
776 return -EINVAL;
777
778 if (selector >= pmic->info[id]->table_len)
779 return -EINVAL;
780 else
781 voltage = pmic->info[id]->table[selector] * 1000;
782
783 return voltage;
784}
785
786static int tps65911_list_voltage(struct regulator_dev *dev, unsigned selector)
787{
788 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
789 int step_mv = 0, id = rdev_get_id(dev);
790
791 switch(id) {
792 case TPS65911_REG_LDO1:
793 case TPS65911_REG_LDO2:
794 case TPS65911_REG_LDO4:
795 /* The first 5 values of the selector correspond to 1V */
796 if (selector < 5)
797 selector = 0;
798 else
799 selector -= 4;
800
801 step_mv = 50;
802 break;
803 case TPS65911_REG_LDO3:
804 case TPS65911_REG_LDO5:
805 case TPS65911_REG_LDO6:
806 case TPS65911_REG_LDO7:
807 case TPS65911_REG_LDO8:
808 /* The first 3 values of the selector correspond to 1V */
809 if (selector < 3)
810 selector = 0;
811 else
812 selector -= 2;
813
814 step_mv = 100;
815 break;
816 case TPS65910_REG_VIO:
817 return pmic->info[id]->table[selector] * 1000;
818 default:
819 return -EINVAL;
820 }
821
822 return (LDO_MIN_VOLT + selector * step_mv) * 1000;
823}
824
825/* Regulator ops (except VRTC) */
826static struct regulator_ops tps65910_ops_dcdc = {
827 .is_enabled = tps65910_is_enabled,
828 .enable = tps65910_enable,
829 .disable = tps65910_disable,
830 .set_mode = tps65910_set_mode,
831 .get_mode = tps65910_get_mode,
832 .get_voltage = tps65910_get_voltage_dcdc,
833 .set_voltage_sel = tps65910_set_voltage_dcdc,
834 .list_voltage = tps65910_list_voltage_dcdc,
835};
836
837static struct regulator_ops tps65910_ops_vdd3 = {
838 .is_enabled = tps65910_is_enabled,
839 .enable = tps65910_enable,
840 .disable = tps65910_disable,
841 .set_mode = tps65910_set_mode,
842 .get_mode = tps65910_get_mode,
843 .get_voltage = tps65910_get_voltage_vdd3,
844 .list_voltage = tps65910_list_voltage,
845};
846
847static struct regulator_ops tps65910_ops = {
848 .is_enabled = tps65910_is_enabled,
849 .enable = tps65910_enable,
850 .disable = tps65910_disable,
851 .set_mode = tps65910_set_mode,
852 .get_mode = tps65910_get_mode,
853 .get_voltage = tps65910_get_voltage,
854 .set_voltage_sel = tps65910_set_voltage,
855 .list_voltage = tps65910_list_voltage,
856};
857
858static struct regulator_ops tps65911_ops = {
859 .is_enabled = tps65910_is_enabled,
860 .enable = tps65910_enable,
861 .disable = tps65910_disable,
862 .set_mode = tps65910_set_mode,
863 .get_mode = tps65910_get_mode,
864 .get_voltage = tps65911_get_voltage,
865 .set_voltage_sel = tps65911_set_voltage,
866 .list_voltage = tps65911_list_voltage,
867};
868
869static __devinit int tps65910_probe(struct platform_device *pdev)
870{
871 struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
872 struct tps_info *info;
873 struct regulator_init_data *reg_data;
874 struct regulator_dev *rdev;
875 struct tps65910_reg *pmic;
876 struct tps65910_board *pmic_plat_data;
877 int i, err;
878
879 pmic_plat_data = dev_get_platdata(tps65910->dev);
880 if (!pmic_plat_data)
881 return -EINVAL;
882
883 reg_data = pmic_plat_data->tps65910_pmic_init_data;
884
885 pmic = kzalloc(sizeof(*pmic), GFP_KERNEL);
886 if (!pmic)
887 return -ENOMEM;
888
889 mutex_init(&pmic->mutex);
890 pmic->mfd = tps65910;
891 platform_set_drvdata(pdev, pmic);
892
893 /* Give control of all register to control port */
894 tps65910_set_bits(pmic->mfd, TPS65910_DEVCTRL,
895 DEVCTRL_SR_CTL_I2C_SEL_MASK);
896
897 switch(tps65910_chip_id(tps65910)) {
898 case TPS65910:
899 pmic->get_ctrl_reg = &tps65910_get_ctrl_register;
900 info = tps65910_regs;
901 case TPS65911:
902 pmic->get_ctrl_reg = &tps65911_get_ctrl_register;
903 info = tps65911_regs;
904 default:
905 pr_err("Invalid tps chip version\n");
906 return -ENODEV;
907 }
908
909 for (i = 0; i < TPS65910_NUM_REGULATOR; i++, info++, reg_data++) {
910 /* Register the regulators */
911 pmic->info[i] = info;
912
913 pmic->desc[i].name = info->name;
914 pmic->desc[i].id = i;
915 pmic->desc[i].n_voltages = info->table_len;
916
917 if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) {
918 pmic->desc[i].ops = &tps65910_ops_dcdc;
919 } else if (i == TPS65910_REG_VDD3) {
920 if (tps65910_chip_id(tps65910) == TPS65910)
921 pmic->desc[i].ops = &tps65910_ops_vdd3;
922 else
923 pmic->desc[i].ops = &tps65910_ops_dcdc;
924 } else {
925 if (tps65910_chip_id(tps65910) == TPS65910)
926 pmic->desc[i].ops = &tps65910_ops;
927 else
928 pmic->desc[i].ops = &tps65911_ops;
929 }
930
931 pmic->desc[i].type = REGULATOR_VOLTAGE;
932 pmic->desc[i].owner = THIS_MODULE;
933
934 rdev = regulator_register(&pmic->desc[i],
935 tps65910->dev, reg_data, pmic);
936 if (IS_ERR(rdev)) {
937 dev_err(tps65910->dev,
938 "failed to register %s regulator\n",
939 pdev->name);
940 err = PTR_ERR(rdev);
941 goto err;
942 }
943
944 /* Save regulator for cleanup */
945 pmic->rdev[i] = rdev;
946 }
947 return 0;
948
949err:
950 while (--i >= 0)
951 regulator_unregister(pmic->rdev[i]);
952
953 kfree(pmic);
954 return err;
955}
956
957static int __devexit tps65910_remove(struct platform_device *pdev)
958{
959 struct tps65910_reg *tps65910_reg = platform_get_drvdata(pdev);
960 int i;
961
962 for (i = 0; i < TPS65910_NUM_REGULATOR; i++)
963 regulator_unregister(tps65910_reg->rdev[i]);
964
965 kfree(tps65910_reg);
966 return 0;
967}
968
969static struct platform_driver tps65910_driver = {
970 .driver = {
971 .name = "tps65910-pmic",
972 .owner = THIS_MODULE,
973 },
974 .probe = tps65910_probe,
975 .remove = __devexit_p(tps65910_remove),
976};
977
978static int __init tps65910_init(void)
979{
980 return platform_driver_register(&tps65910_driver);
981}
982subsys_initcall(tps65910_init);
983
984static void __exit tps65910_cleanup(void)
985{
986 platform_driver_unregister(&tps65910_driver);
987}
988module_exit(tps65910_cleanup);
989
990MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
991MODULE_DESCRIPTION("TPS6507x voltage regulator driver");
992MODULE_LICENSE("GPL v2");
993MODULE_ALIAS("platform:tps65910-pmic");
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 6a292852a358..87fe0f75a56e 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -51,8 +51,13 @@ struct twlreg_info {
51 u16 min_mV; 51 u16 min_mV;
52 u16 max_mV; 52 u16 max_mV;
53 53
54 u8 flags;
55
54 /* used by regulator core */ 56 /* used by regulator core */
55 struct regulator_desc desc; 57 struct regulator_desc desc;
58
59 /* chip specific features */
60 unsigned long features;
56}; 61};
57 62
58 63
@@ -70,12 +75,35 @@ struct twlreg_info {
70#define VREG_TRANS 1 75#define VREG_TRANS 1
71#define VREG_STATE 2 76#define VREG_STATE 2
72#define VREG_VOLTAGE 3 77#define VREG_VOLTAGE 3
78#define VREG_VOLTAGE_SMPS 4
73/* TWL6030 Misc register offsets */ 79/* TWL6030 Misc register offsets */
74#define VREG_BC_ALL 1 80#define VREG_BC_ALL 1
75#define VREG_BC_REF 2 81#define VREG_BC_REF 2
76#define VREG_BC_PROC 3 82#define VREG_BC_PROC 3
77#define VREG_BC_CLK_RST 4 83#define VREG_BC_CLK_RST 4
78 84
85/* TWL6030 LDO register values for CFG_STATE */
86#define TWL6030_CFG_STATE_OFF 0x00
87#define TWL6030_CFG_STATE_ON 0x01
88#define TWL6030_CFG_STATE_OFF2 0x02
89#define TWL6030_CFG_STATE_SLEEP 0x03
90#define TWL6030_CFG_STATE_GRP_SHIFT 5
91#define TWL6030_CFG_STATE_APP_SHIFT 2
92#define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT)
93#define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\
94 TWL6030_CFG_STATE_APP_SHIFT)
95
96/* Flags for SMPS Voltage reading */
97#define SMPS_OFFSET_EN BIT(0)
98#define SMPS_EXTENDED_EN BIT(1)
99
100/* twl6025 SMPS EPROM values */
101#define TWL6030_SMPS_OFFSET 0xB0
102#define TWL6030_SMPS_MULT 0xB3
103#define SMPS_MULTOFFSET_SMPS4 BIT(0)
104#define SMPS_MULTOFFSET_VIO BIT(1)
105#define SMPS_MULTOFFSET_SMPS3 BIT(6)
106
79static inline int 107static inline int
80twlreg_read(struct twlreg_info *info, unsigned slave_subgp, unsigned offset) 108twlreg_read(struct twlreg_info *info, unsigned slave_subgp, unsigned offset)
81{ 109{
@@ -118,21 +146,38 @@ static int twlreg_grp(struct regulator_dev *rdev)
118#define P2_GRP_6030 BIT(1) /* "peripherals" */ 146#define P2_GRP_6030 BIT(1) /* "peripherals" */
119#define P1_GRP_6030 BIT(0) /* CPU/Linux */ 147#define P1_GRP_6030 BIT(0) /* CPU/Linux */
120 148
121static int twlreg_is_enabled(struct regulator_dev *rdev) 149static int twl4030reg_is_enabled(struct regulator_dev *rdev)
122{ 150{
123 int state = twlreg_grp(rdev); 151 int state = twlreg_grp(rdev);
124 152
125 if (state < 0) 153 if (state < 0)
126 return state; 154 return state;
127 155
128 if (twl_class_is_4030()) 156 return state & P1_GRP_4030;
129 state &= P1_GRP_4030; 157}
158
159static int twl6030reg_is_enabled(struct regulator_dev *rdev)
160{
161 struct twlreg_info *info = rdev_get_drvdata(rdev);
162 int grp = 0, val;
163
164 if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
165 grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
166 if (grp < 0)
167 return grp;
168
169 if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
170 grp &= P1_GRP_6030;
130 else 171 else
131 state &= P1_GRP_6030; 172 grp = 1;
132 return state; 173
174 val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
175 val = TWL6030_CFG_STATE_APP(val);
176
177 return grp && (val == TWL6030_CFG_STATE_ON);
133} 178}
134 179
135static int twlreg_enable(struct regulator_dev *rdev) 180static int twl4030reg_enable(struct regulator_dev *rdev)
136{ 181{
137 struct twlreg_info *info = rdev_get_drvdata(rdev); 182 struct twlreg_info *info = rdev_get_drvdata(rdev);
138 int grp; 183 int grp;
@@ -142,10 +187,7 @@ static int twlreg_enable(struct regulator_dev *rdev)
142 if (grp < 0) 187 if (grp < 0)
143 return grp; 188 return grp;
144 189
145 if (twl_class_is_4030()) 190 grp |= P1_GRP_4030;
146 grp |= P1_GRP_4030;
147 else
148 grp |= P1_GRP_6030;
149 191
150 ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp); 192 ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
151 193
@@ -154,29 +196,63 @@ static int twlreg_enable(struct regulator_dev *rdev)
154 return ret; 196 return ret;
155} 197}
156 198
157static int twlreg_disable(struct regulator_dev *rdev) 199static int twl6030reg_enable(struct regulator_dev *rdev)
200{
201 struct twlreg_info *info = rdev_get_drvdata(rdev);
202 int grp = 0;
203 int ret;
204
205 if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
206 grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
207 if (grp < 0)
208 return grp;
209
210 ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE,
211 grp << TWL6030_CFG_STATE_GRP_SHIFT |
212 TWL6030_CFG_STATE_ON);
213
214 udelay(info->delay);
215
216 return ret;
217}
218
219static int twl4030reg_disable(struct regulator_dev *rdev)
158{ 220{
159 struct twlreg_info *info = rdev_get_drvdata(rdev); 221 struct twlreg_info *info = rdev_get_drvdata(rdev);
160 int grp; 222 int grp;
223 int ret;
161 224
162 grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP); 225 grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
163 if (grp < 0) 226 if (grp < 0)
164 return grp; 227 return grp;
165 228
166 if (twl_class_is_4030()) 229 grp &= ~(P1_GRP_4030 | P2_GRP_4030 | P3_GRP_4030);
167 grp &= ~(P1_GRP_4030 | P2_GRP_4030 | P3_GRP_4030);
168 else
169 grp &= ~(P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030);
170 230
171 return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp); 231 ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
232
233 return ret;
172} 234}
173 235
174static int twlreg_get_status(struct regulator_dev *rdev) 236static int twl6030reg_disable(struct regulator_dev *rdev)
175{ 237{
176 int state = twlreg_grp(rdev); 238 struct twlreg_info *info = rdev_get_drvdata(rdev);
239 int grp = 0;
240 int ret;
241
242 if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
243 grp = P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030;
244
245 /* For 6030, set the off state for all grps enabled */
246 ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE,
247 (grp) << TWL6030_CFG_STATE_GRP_SHIFT |
248 TWL6030_CFG_STATE_OFF);
249
250 return ret;
251}
177 252
178 if (twl_class_is_6030()) 253static int twl4030reg_get_status(struct regulator_dev *rdev)
179 return 0; /* FIXME return for 6030 regulator */ 254{
255 int state = twlreg_grp(rdev);
180 256
181 if (state < 0) 257 if (state < 0)
182 return state; 258 return state;
@@ -190,15 +266,39 @@ static int twlreg_get_status(struct regulator_dev *rdev)
190 : REGULATOR_STATUS_STANDBY; 266 : REGULATOR_STATUS_STANDBY;
191} 267}
192 268
193static int twlreg_set_mode(struct regulator_dev *rdev, unsigned mode) 269static int twl6030reg_get_status(struct regulator_dev *rdev)
270{
271 struct twlreg_info *info = rdev_get_drvdata(rdev);
272 int val;
273
274 val = twlreg_grp(rdev);
275 if (val < 0)
276 return val;
277
278 val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
279
280 switch (TWL6030_CFG_STATE_APP(val)) {
281 case TWL6030_CFG_STATE_ON:
282 return REGULATOR_STATUS_NORMAL;
283
284 case TWL6030_CFG_STATE_SLEEP:
285 return REGULATOR_STATUS_STANDBY;
286
287 case TWL6030_CFG_STATE_OFF:
288 case TWL6030_CFG_STATE_OFF2:
289 default:
290 break;
291 }
292
293 return REGULATOR_STATUS_OFF;
294}
295
296static int twl4030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
194{ 297{
195 struct twlreg_info *info = rdev_get_drvdata(rdev); 298 struct twlreg_info *info = rdev_get_drvdata(rdev);
196 unsigned message; 299 unsigned message;
197 int status; 300 int status;
198 301
199 if (twl_class_is_6030())
200 return 0; /* FIXME return for 6030 regulator */
201
202 /* We can only set the mode through state machine commands... */ 302 /* We can only set the mode through state machine commands... */
203 switch (mode) { 303 switch (mode) {
204 case REGULATOR_MODE_NORMAL: 304 case REGULATOR_MODE_NORMAL:
@@ -227,6 +327,36 @@ static int twlreg_set_mode(struct regulator_dev *rdev, unsigned mode)
227 message & 0xff, TWL4030_PM_MASTER_PB_WORD_LSB); 327 message & 0xff, TWL4030_PM_MASTER_PB_WORD_LSB);
228} 328}
229 329
330static int twl6030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
331{
332 struct twlreg_info *info = rdev_get_drvdata(rdev);
333 int grp = 0;
334 int val;
335
336 if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
337 grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
338
339 if (grp < 0)
340 return grp;
341
342 /* Compose the state register settings */
343 val = grp << TWL6030_CFG_STATE_GRP_SHIFT;
344 /* We can only set the mode through state machine commands... */
345 switch (mode) {
346 case REGULATOR_MODE_NORMAL:
347 val |= TWL6030_CFG_STATE_ON;
348 break;
349 case REGULATOR_MODE_STANDBY:
350 val |= TWL6030_CFG_STATE_SLEEP;
351 break;
352
353 default:
354 return -EINVAL;
355 }
356
357 return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE, val);
358}
359
230/*----------------------------------------------------------------------*/ 360/*----------------------------------------------------------------------*/
231 361
232/* 362/*
@@ -375,13 +505,13 @@ static struct regulator_ops twl4030ldo_ops = {
375 .set_voltage = twl4030ldo_set_voltage, 505 .set_voltage = twl4030ldo_set_voltage,
376 .get_voltage = twl4030ldo_get_voltage, 506 .get_voltage = twl4030ldo_get_voltage,
377 507
378 .enable = twlreg_enable, 508 .enable = twl4030reg_enable,
379 .disable = twlreg_disable, 509 .disable = twl4030reg_disable,
380 .is_enabled = twlreg_is_enabled, 510 .is_enabled = twl4030reg_is_enabled,
381 511
382 .set_mode = twlreg_set_mode, 512 .set_mode = twl4030reg_set_mode,
383 513
384 .get_status = twlreg_get_status, 514 .get_status = twl4030reg_get_status,
385}; 515};
386 516
387static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) 517static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
@@ -433,13 +563,13 @@ static struct regulator_ops twl6030ldo_ops = {
433 .set_voltage = twl6030ldo_set_voltage, 563 .set_voltage = twl6030ldo_set_voltage,
434 .get_voltage = twl6030ldo_get_voltage, 564 .get_voltage = twl6030ldo_get_voltage,
435 565
436 .enable = twlreg_enable, 566 .enable = twl6030reg_enable,
437 .disable = twlreg_disable, 567 .disable = twl6030reg_disable,
438 .is_enabled = twlreg_is_enabled, 568 .is_enabled = twl6030reg_is_enabled,
439 569
440 .set_mode = twlreg_set_mode, 570 .set_mode = twl6030reg_set_mode,
441 571
442 .get_status = twlreg_get_status, 572 .get_status = twl6030reg_get_status,
443}; 573};
444 574
445/*----------------------------------------------------------------------*/ 575/*----------------------------------------------------------------------*/
@@ -461,25 +591,242 @@ static int twlfixed_get_voltage(struct regulator_dev *rdev)
461 return info->min_mV * 1000; 591 return info->min_mV * 1000;
462} 592}
463 593
464static struct regulator_ops twlfixed_ops = { 594static struct regulator_ops twl4030fixed_ops = {
595 .list_voltage = twlfixed_list_voltage,
596
597 .get_voltage = twlfixed_get_voltage,
598
599 .enable = twl4030reg_enable,
600 .disable = twl4030reg_disable,
601 .is_enabled = twl4030reg_is_enabled,
602
603 .set_mode = twl4030reg_set_mode,
604
605 .get_status = twl4030reg_get_status,
606};
607
608static struct regulator_ops twl6030fixed_ops = {
465 .list_voltage = twlfixed_list_voltage, 609 .list_voltage = twlfixed_list_voltage,
466 610
467 .get_voltage = twlfixed_get_voltage, 611 .get_voltage = twlfixed_get_voltage,
468 612
469 .enable = twlreg_enable, 613 .enable = twl6030reg_enable,
470 .disable = twlreg_disable, 614 .disable = twl6030reg_disable,
471 .is_enabled = twlreg_is_enabled, 615 .is_enabled = twl6030reg_is_enabled,
472 616
473 .set_mode = twlreg_set_mode, 617 .set_mode = twl6030reg_set_mode,
474 618
475 .get_status = twlreg_get_status, 619 .get_status = twl6030reg_get_status,
476}; 620};
477 621
478static struct regulator_ops twl6030_fixed_resource = { 622static struct regulator_ops twl6030_fixed_resource = {
479 .enable = twlreg_enable, 623 .enable = twl6030reg_enable,
480 .disable = twlreg_disable, 624 .disable = twl6030reg_disable,
481 .is_enabled = twlreg_is_enabled, 625 .is_enabled = twl6030reg_is_enabled,
482 .get_status = twlreg_get_status, 626 .get_status = twl6030reg_get_status,
627};
628
629/*
630 * SMPS status and control
631 */
632
633static int twl6030smps_list_voltage(struct regulator_dev *rdev, unsigned index)
634{
635 struct twlreg_info *info = rdev_get_drvdata(rdev);
636
637 int voltage = 0;
638
639 switch (info->flags) {
640 case SMPS_OFFSET_EN:
641 voltage = 100000;
642 /* fall through */
643 case 0:
644 switch (index) {
645 case 0:
646 voltage = 0;
647 break;
648 case 58:
649 voltage = 1350 * 1000;
650 break;
651 case 59:
652 voltage = 1500 * 1000;
653 break;
654 case 60:
655 voltage = 1800 * 1000;
656 break;
657 case 61:
658 voltage = 1900 * 1000;
659 break;
660 case 62:
661 voltage = 2100 * 1000;
662 break;
663 default:
664 voltage += (600000 + (12500 * (index - 1)));
665 }
666 break;
667 case SMPS_EXTENDED_EN:
668 switch (index) {
669 case 0:
670 voltage = 0;
671 break;
672 case 58:
673 voltage = 2084 * 1000;
674 break;
675 case 59:
676 voltage = 2315 * 1000;
677 break;
678 case 60:
679 voltage = 2778 * 1000;
680 break;
681 case 61:
682 voltage = 2932 * 1000;
683 break;
684 case 62:
685 voltage = 3241 * 1000;
686 break;
687 default:
688 voltage = (1852000 + (38600 * (index - 1)));
689 }
690 break;
691 case SMPS_OFFSET_EN | SMPS_EXTENDED_EN:
692 switch (index) {
693 case 0:
694 voltage = 0;
695 break;
696 case 58:
697 voltage = 4167 * 1000;
698 break;
699 case 59:
700 voltage = 2315 * 1000;
701 break;
702 case 60:
703 voltage = 2778 * 1000;
704 break;
705 case 61:
706 voltage = 2932 * 1000;
707 break;
708 case 62:
709 voltage = 3241 * 1000;
710 break;
711 default:
712 voltage = (2161000 + (38600 * (index - 1)));
713 }
714 break;
715 }
716
717 return voltage;
718}
719
720static int
721twl6030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
722 unsigned int *selector)
723{
724 struct twlreg_info *info = rdev_get_drvdata(rdev);
725 int vsel = 0;
726
727 switch (info->flags) {
728 case 0:
729 if (min_uV == 0)
730 vsel = 0;
731 else if ((min_uV >= 600000) && (max_uV <= 1300000)) {
732 vsel = (min_uV - 600000) / 125;
733 if (vsel % 100)
734 vsel += 100;
735 vsel /= 100;
736 vsel++;
737 }
738 /* Values 1..57 for vsel are linear and can be calculated
739 * values 58..62 are non linear.
740 */
741 else if ((min_uV > 1900000) && (max_uV >= 2100000))
742 vsel = 62;
743 else if ((min_uV > 1800000) && (max_uV >= 1900000))
744 vsel = 61;
745 else if ((min_uV > 1500000) && (max_uV >= 1800000))
746 vsel = 60;
747 else if ((min_uV > 1350000) && (max_uV >= 1500000))
748 vsel = 59;
749 else if ((min_uV > 1300000) && (max_uV >= 1350000))
750 vsel = 58;
751 else
752 return -EINVAL;
753 break;
754 case SMPS_OFFSET_EN:
755 if (min_uV == 0)
756 vsel = 0;
757 else if ((min_uV >= 700000) && (max_uV <= 1420000)) {
758 vsel = (min_uV - 700000) / 125;
759 if (vsel % 100)
760 vsel += 100;
761 vsel /= 100;
762 vsel++;
763 }
764 /* Values 1..57 for vsel are linear and can be calculated
765 * values 58..62 are non linear.
766 */
767 else if ((min_uV > 1900000) && (max_uV >= 2100000))
768 vsel = 62;
769 else if ((min_uV > 1800000) && (max_uV >= 1900000))
770 vsel = 61;
771 else if ((min_uV > 1350000) && (max_uV >= 1800000))
772 vsel = 60;
773 else if ((min_uV > 1350000) && (max_uV >= 1500000))
774 vsel = 59;
775 else if ((min_uV > 1300000) && (max_uV >= 1350000))
776 vsel = 58;
777 else
778 return -EINVAL;
779 break;
780 case SMPS_EXTENDED_EN:
781 if (min_uV == 0)
782 vsel = 0;
783 else if ((min_uV >= 1852000) && (max_uV <= 4013600)) {
784 vsel = (min_uV - 1852000) / 386;
785 if (vsel % 100)
786 vsel += 100;
787 vsel /= 100;
788 vsel++;
789 }
790 break;
791 case SMPS_OFFSET_EN|SMPS_EXTENDED_EN:
792 if (min_uV == 0)
793 vsel = 0;
794 else if ((min_uV >= 2161000) && (max_uV <= 4321000)) {
795 vsel = (min_uV - 1852000) / 386;
796 if (vsel % 100)
797 vsel += 100;
798 vsel /= 100;
799 vsel++;
800 }
801 break;
802 }
803
804 *selector = vsel;
805
806 return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS,
807 vsel);
808}
809
810static int twl6030smps_get_voltage_sel(struct regulator_dev *rdev)
811{
812 struct twlreg_info *info = rdev_get_drvdata(rdev);
813
814 return twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS);
815}
816
817static struct regulator_ops twlsmps_ops = {
818 .list_voltage = twl6030smps_list_voltage,
819
820 .set_voltage = twl6030smps_set_voltage,
821 .get_voltage_sel = twl6030smps_get_voltage_sel,
822
823 .enable = twl6030reg_enable,
824 .disable = twl6030reg_disable,
825 .is_enabled = twl6030reg_is_enabled,
826
827 .set_mode = twl6030reg_set_mode,
828
829 .get_status = twl6030reg_get_status,
483}; 830};
484 831
485/*----------------------------------------------------------------------*/ 832/*----------------------------------------------------------------------*/
@@ -487,11 +834,10 @@ static struct regulator_ops twl6030_fixed_resource = {
487#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ 834#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
488 remap_conf) \ 835 remap_conf) \
489 TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ 836 TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
490 remap_conf, TWL4030) 837 remap_conf, TWL4030, twl4030fixed_ops)
491#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ 838#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay) \
492 remap_conf) \
493 TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ 839 TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
494 remap_conf, TWL6030) 840 0x0, TWL6030, twl6030fixed_ops)
495 841
496#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \ 842#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \
497 .base = offset, \ 843 .base = offset, \
@@ -510,13 +856,11 @@ static struct regulator_ops twl6030_fixed_resource = {
510 }, \ 856 }, \
511 } 857 }
512 858
513#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num, \ 859#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \
514 remap_conf) { \
515 .base = offset, \ 860 .base = offset, \
516 .id = num, \ 861 .id = num, \
517 .min_mV = min_mVolts, \ 862 .min_mV = min_mVolts, \
518 .max_mV = max_mVolts, \ 863 .max_mV = max_mVolts, \
519 .remap = remap_conf, \
520 .desc = { \ 864 .desc = { \
521 .name = #label, \ 865 .name = #label, \
522 .id = TWL6030_REG_##label, \ 866 .id = TWL6030_REG_##label, \
@@ -527,9 +871,23 @@ static struct regulator_ops twl6030_fixed_resource = {
527 }, \ 871 }, \
528 } 872 }
529 873
874#define TWL6025_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \
875 .base = offset, \
876 .id = num, \
877 .min_mV = min_mVolts, \
878 .max_mV = max_mVolts, \
879 .desc = { \
880 .name = #label, \
881 .id = TWL6025_REG_##label, \
882 .n_voltages = ((max_mVolts - min_mVolts)/100) + 1, \
883 .ops = &twl6030ldo_ops, \
884 .type = REGULATOR_VOLTAGE, \
885 .owner = THIS_MODULE, \
886 }, \
887 }
530 888
531#define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \ 889#define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \
532 family) { \ 890 family, operations) { \
533 .base = offset, \ 891 .base = offset, \
534 .id = num, \ 892 .id = num, \
535 .min_mV = mVolts, \ 893 .min_mV = mVolts, \
@@ -539,17 +897,16 @@ static struct regulator_ops twl6030_fixed_resource = {
539 .name = #label, \ 897 .name = #label, \
540 .id = family##_REG_##label, \ 898 .id = family##_REG_##label, \
541 .n_voltages = 1, \ 899 .n_voltages = 1, \
542 .ops = &twlfixed_ops, \ 900 .ops = &operations, \
543 .type = REGULATOR_VOLTAGE, \ 901 .type = REGULATOR_VOLTAGE, \
544 .owner = THIS_MODULE, \ 902 .owner = THIS_MODULE, \
545 }, \ 903 }, \
546 } 904 }
547 905
548#define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay, remap_conf) { \ 906#define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay) { \
549 .base = offset, \ 907 .base = offset, \
550 .id = num, \ 908 .id = num, \
551 .delay = turnon_delay, \ 909 .delay = turnon_delay, \
552 .remap = remap_conf, \
553 .desc = { \ 910 .desc = { \
554 .name = #label, \ 911 .name = #label, \
555 .id = TWL6030_REG_##label, \ 912 .id = TWL6030_REG_##label, \
@@ -559,6 +916,21 @@ static struct regulator_ops twl6030_fixed_resource = {
559 }, \ 916 }, \
560 } 917 }
561 918
919#define TWL6025_ADJUSTABLE_SMPS(label, offset, num) { \
920 .base = offset, \
921 .id = num, \
922 .min_mV = 600, \
923 .max_mV = 2100, \
924 .desc = { \
925 .name = #label, \
926 .id = TWL6025_REG_##label, \
927 .n_voltages = 63, \
928 .ops = &twlsmps_ops, \
929 .type = REGULATOR_VOLTAGE, \
930 .owner = THIS_MODULE, \
931 }, \
932 }
933
562/* 934/*
563 * We list regulators here if systems need some level of 935 * We list regulators here if systems need some level of
564 * software control over them after boot. 936 * software control over them after boot.
@@ -589,19 +961,52 @@ static struct twlreg_info twl_regs[] = {
589 /* 6030 REG with base as PMC Slave Misc : 0x0030 */ 961 /* 6030 REG with base as PMC Slave Misc : 0x0030 */
590 /* Turnon-delay and remap configuration values for 6030 are not 962 /* Turnon-delay and remap configuration values for 6030 are not
591 verified since the specification is not public */ 963 verified since the specification is not public */
592 TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1, 0x21), 964 TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1),
593 TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2, 0x21), 965 TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2),
594 TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3, 0x21), 966 TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3),
595 TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4, 0x21), 967 TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4),
596 TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5, 0x21), 968 TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5),
597 TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7, 0x21), 969 TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7),
598 TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x21), 970 TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0),
599 TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x21), 971 TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0),
600 TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x21), 972 TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0),
601 TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x21), 973 TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0),
602 TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0, 0x21), 974 TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0),
975
976 /* 6025 are renamed compared to 6030 versions */
977 TWL6025_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300, 1),
978 TWL6025_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300, 2),
979 TWL6025_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300, 3),
980 TWL6025_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300, 4),
981 TWL6025_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300, 5),
982 TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300, 7),
983 TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300, 16),
984 TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300, 17),
985 TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300, 18),
986
987 TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34, 1),
988 TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10, 2),
989 TWL6025_ADJUSTABLE_SMPS(VIO, 0x16, 3),
603}; 990};
604 991
992static u8 twl_get_smps_offset(void)
993{
994 u8 value;
995
996 twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value,
997 TWL6030_SMPS_OFFSET);
998 return value;
999}
1000
1001static u8 twl_get_smps_mult(void)
1002{
1003 u8 value;
1004
1005 twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value,
1006 TWL6030_SMPS_MULT);
1007 return value;
1008}
1009
605static int __devinit twlreg_probe(struct platform_device *pdev) 1010static int __devinit twlreg_probe(struct platform_device *pdev)
606{ 1011{
607 int i; 1012 int i;
@@ -623,6 +1028,9 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
623 if (!initdata) 1028 if (!initdata)
624 return -EINVAL; 1029 return -EINVAL;
625 1030
1031 /* copy the features into regulator data */
1032 info->features = (unsigned long)initdata->driver_data;
1033
626 /* Constrain board-specific capabilities according to what 1034 /* Constrain board-specific capabilities according to what
627 * this driver and the chip itself can actually do. 1035 * this driver and the chip itself can actually do.
628 */ 1036 */
@@ -645,6 +1053,27 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
645 break; 1053 break;
646 } 1054 }
647 1055
1056 switch (pdev->id) {
1057 case TWL6025_REG_SMPS3:
1058 if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS3)
1059 info->flags |= SMPS_EXTENDED_EN;
1060 if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS3)
1061 info->flags |= SMPS_OFFSET_EN;
1062 break;
1063 case TWL6025_REG_SMPS4:
1064 if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS4)
1065 info->flags |= SMPS_EXTENDED_EN;
1066 if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS4)
1067 info->flags |= SMPS_OFFSET_EN;
1068 break;
1069 case TWL6025_REG_VIO:
1070 if (twl_get_smps_mult() & SMPS_MULTOFFSET_VIO)
1071 info->flags |= SMPS_EXTENDED_EN;
1072 if (twl_get_smps_offset() & SMPS_MULTOFFSET_VIO)
1073 info->flags |= SMPS_OFFSET_EN;
1074 break;
1075 }
1076
648 rdev = regulator_register(&info->desc, &pdev->dev, initdata, info); 1077 rdev = regulator_register(&info->desc, &pdev->dev, initdata, info);
649 if (IS_ERR(rdev)) { 1078 if (IS_ERR(rdev)) {
650 dev_err(&pdev->dev, "can't register %s, %ld\n", 1079 dev_err(&pdev->dev, "can't register %s, %ld\n",
@@ -653,7 +1082,8 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
653 } 1082 }
654 platform_set_drvdata(pdev, rdev); 1083 platform_set_drvdata(pdev, rdev);
655 1084
656 twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_REMAP, 1085 if (twl_class_is_4030())
1086 twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_REMAP,
657 info->remap); 1087 info->remap);
658 1088
659 /* NOTE: many regulators support short-circuit IRQs (presentable 1089 /* NOTE: many regulators support short-circuit IRQs (presentable
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index e93453b1b978..a0982e809851 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -600,7 +600,6 @@ err:
600static __devexit int wm831x_buckv_remove(struct platform_device *pdev) 600static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
601{ 601{
602 struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); 602 struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
603 struct wm831x *wm831x = dcdc->wm831x;
604 603
605 platform_set_drvdata(pdev, NULL); 604 platform_set_drvdata(pdev, NULL);
606 605
@@ -776,7 +775,6 @@ err:
776static __devexit int wm831x_buckp_remove(struct platform_device *pdev) 775static __devexit int wm831x_buckp_remove(struct platform_device *pdev)
777{ 776{
778 struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); 777 struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
779 struct wm831x *wm831x = dcdc->wm831x;
780 778
781 platform_set_drvdata(pdev, NULL); 779 platform_set_drvdata(pdev, NULL);
782 780
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index b42d01cef35a..0f12c70bebc9 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -55,7 +55,7 @@ static int wm8400_ldo_list_voltage(struct regulator_dev *dev,
55 return 1600000 + ((selector - 14) * 100000); 55 return 1600000 + ((selector - 14) * 100000);
56} 56}
57 57
58static int wm8400_ldo_get_voltage(struct regulator_dev *dev) 58static int wm8400_ldo_get_voltage_sel(struct regulator_dev *dev)
59{ 59{
60 struct wm8400 *wm8400 = rdev_get_drvdata(dev); 60 struct wm8400 *wm8400 = rdev_get_drvdata(dev);
61 u16 val; 61 u16 val;
@@ -63,7 +63,7 @@ static int wm8400_ldo_get_voltage(struct regulator_dev *dev)
63 val = wm8400_reg_read(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev)); 63 val = wm8400_reg_read(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev));
64 val &= WM8400_LDO1_VSEL_MASK; 64 val &= WM8400_LDO1_VSEL_MASK;
65 65
66 return wm8400_ldo_list_voltage(dev, val); 66 return val;
67} 67}
68 68
69static int wm8400_ldo_set_voltage(struct regulator_dev *dev, 69static int wm8400_ldo_set_voltage(struct regulator_dev *dev,
@@ -104,7 +104,7 @@ static struct regulator_ops wm8400_ldo_ops = {
104 .enable = wm8400_ldo_enable, 104 .enable = wm8400_ldo_enable,
105 .disable = wm8400_ldo_disable, 105 .disable = wm8400_ldo_disable,
106 .list_voltage = wm8400_ldo_list_voltage, 106 .list_voltage = wm8400_ldo_list_voltage,
107 .get_voltage = wm8400_ldo_get_voltage, 107 .get_voltage_sel = wm8400_ldo_get_voltage_sel,
108 .set_voltage = wm8400_ldo_set_voltage, 108 .set_voltage = wm8400_ldo_set_voltage,
109}; 109};
110 110
@@ -145,7 +145,7 @@ static int wm8400_dcdc_list_voltage(struct regulator_dev *dev,
145 return 850000 + (selector * 25000); 145 return 850000 + (selector * 25000);
146} 146}
147 147
148static int wm8400_dcdc_get_voltage(struct regulator_dev *dev) 148static int wm8400_dcdc_get_voltage_sel(struct regulator_dev *dev)
149{ 149{
150 struct wm8400 *wm8400 = rdev_get_drvdata(dev); 150 struct wm8400 *wm8400 = rdev_get_drvdata(dev);
151 u16 val; 151 u16 val;
@@ -154,7 +154,7 @@ static int wm8400_dcdc_get_voltage(struct regulator_dev *dev)
154 val = wm8400_reg_read(wm8400, WM8400_DCDC1_CONTROL_1 + offset); 154 val = wm8400_reg_read(wm8400, WM8400_DCDC1_CONTROL_1 + offset);
155 val &= WM8400_DC1_VSEL_MASK; 155 val &= WM8400_DC1_VSEL_MASK;
156 156
157 return 850000 + (25000 * val); 157 return val;
158} 158}
159 159
160static int wm8400_dcdc_set_voltage(struct regulator_dev *dev, 160static int wm8400_dcdc_set_voltage(struct regulator_dev *dev,
@@ -261,7 +261,7 @@ static struct regulator_ops wm8400_dcdc_ops = {
261 .enable = wm8400_dcdc_enable, 261 .enable = wm8400_dcdc_enable,
262 .disable = wm8400_dcdc_disable, 262 .disable = wm8400_dcdc_disable,
263 .list_voltage = wm8400_dcdc_list_voltage, 263 .list_voltage = wm8400_dcdc_list_voltage,
264 .get_voltage = wm8400_dcdc_get_voltage, 264 .get_voltage_sel = wm8400_dcdc_get_voltage_sel,
265 .set_voltage = wm8400_dcdc_set_voltage, 265 .set_voltage = wm8400_dcdc_set_voltage,
266 .get_mode = wm8400_dcdc_get_mode, 266 .get_mode = wm8400_dcdc_get_mode,
267 .set_mode = wm8400_dcdc_set_mode, 267 .set_mode = wm8400_dcdc_set_mode,
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 8e437e2f6281..f822e13dc04b 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -361,12 +361,39 @@ config RTC_DRV_RX8025
361 This driver can also be built as a module. If so, the module 361 This driver can also be built as a module. If so, the module
362 will be called rtc-rx8025. 362 will be called rtc-rx8025.
363 363
364config RTC_DRV_EM3027
365 tristate "EM Microelectronic EM3027"
366 help
367 If you say yes here you get support for the EM
368 Microelectronic EM3027 RTC chips.
369
370 This driver can also be built as a module. If so, the module
371 will be called rtc-em3027.
372
373config RTC_DRV_RV3029C2
374 tristate "Micro Crystal RTC"
375 help
376 If you say yes here you get support for the Micro Crystal
377 RV3029-C2 RTC chips.
378
379 This driver can also be built as a module. If so, the module
380 will be called rtc-rv3029c2.
381
364endif # I2C 382endif # I2C
365 383
366comment "SPI RTC drivers" 384comment "SPI RTC drivers"
367 385
368if SPI_MASTER 386if SPI_MASTER
369 387
388config RTC_DRV_M41T93
389 tristate "ST M41T93"
390 help
391 If you say yes here you will get support for the
392 ST M41T93 SPI RTC chip.
393
394 This driver can also be built as a module. If so, the module
395 will be called rtc-m41t93.
396
370config RTC_DRV_M41T94 397config RTC_DRV_M41T94
371 tristate "ST M41T94" 398 tristate "ST M41T94"
372 help 399 help
@@ -655,6 +682,14 @@ config RTC_DRV_WM8350
655 This driver can also be built as a module. If so, the module 682 This driver can also be built as a module. If so, the module
656 will be called "rtc-wm8350". 683 will be called "rtc-wm8350".
657 684
685config RTC_DRV_SPEAR
686 tristate "SPEAR ST RTC"
687 depends on PLAT_SPEAR
688 default y
689 help
690 If you say Y here you will get support for the RTC found on
691 spear
692
658config RTC_DRV_PCF50633 693config RTC_DRV_PCF50633
659 depends on MFD_PCF50633 694 depends on MFD_PCF50633
660 tristate "NXP PCF50633 RTC" 695 tristate "NXP PCF50633 RTC"
@@ -884,6 +919,13 @@ config RTC_DRV_PXA
884 This RTC driver uses PXA RTC registers available since pxa27x 919 This RTC driver uses PXA RTC registers available since pxa27x
885 series (RDxR, RYxR) instead of legacy RCNR, RTAR. 920 series (RDxR, RYxR) instead of legacy RCNR, RTAR.
886 921
922config RTC_DRV_VT8500
923 tristate "VIA/WonderMedia 85xx SoC RTC"
924 depends on ARCH_VT8500
925 help
926 If you say Y here you will get access to the real time clock
927 built into your VIA VT8500 SoC or its relatives.
928
887 929
888config RTC_DRV_SUN4V 930config RTC_DRV_SUN4V
889 bool "SUN4V Hypervisor RTC" 931 bool "SUN4V Hypervisor RTC"
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 612f5a88a8ee..213d725f16d4 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -44,6 +44,7 @@ obj-$(CONFIG_RTC_DRV_DS1742) += rtc-ds1742.o
44obj-$(CONFIG_RTC_DRV_DS3232) += rtc-ds3232.o 44obj-$(CONFIG_RTC_DRV_DS3232) += rtc-ds3232.o
45obj-$(CONFIG_RTC_DRV_DS3234) += rtc-ds3234.o 45obj-$(CONFIG_RTC_DRV_DS3234) += rtc-ds3234.o
46obj-$(CONFIG_RTC_DRV_EFI) += rtc-efi.o 46obj-$(CONFIG_RTC_DRV_EFI) += rtc-efi.o
47obj-$(CONFIG_RTC_DRV_EM3027) += rtc-em3027.o
47obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o 48obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o
48obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o 49obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o
49obj-$(CONFIG_RTC_DRV_GENERIC) += rtc-generic.o 50obj-$(CONFIG_RTC_DRV_GENERIC) += rtc-generic.o
@@ -53,6 +54,7 @@ obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o
53obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o 54obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o
54obj-$(CONFIG_RTC_DRV_LPC32XX) += rtc-lpc32xx.o 55obj-$(CONFIG_RTC_DRV_LPC32XX) += rtc-lpc32xx.o
55obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o 56obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o
57obj-$(CONFIG_RTC_DRV_M41T93) += rtc-m41t93.o
56obj-$(CONFIG_RTC_DRV_M41T94) += rtc-m41t94.o 58obj-$(CONFIG_RTC_DRV_M41T94) += rtc-m41t94.o
57obj-$(CONFIG_RTC_DRV_M48T35) += rtc-m48t35.o 59obj-$(CONFIG_RTC_DRV_M48T35) += rtc-m48t35.o
58obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o 60obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o
@@ -82,12 +84,14 @@ obj-$(CONFIG_RTC_DRV_RP5C01) += rtc-rp5c01.o
82obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o 84obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
83obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o 85obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
84obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o 86obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
87obj-$(CONFIG_RTC_DRV_RV3029C2) += rtc-rv3029c2.o
85obj-$(CONFIG_RTC_DRV_RX8025) += rtc-rx8025.o 88obj-$(CONFIG_RTC_DRV_RX8025) += rtc-rx8025.o
86obj-$(CONFIG_RTC_DRV_RX8581) += rtc-rx8581.o 89obj-$(CONFIG_RTC_DRV_RX8581) += rtc-rx8581.o
87obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o 90obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o
88obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o 91obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
89obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o 92obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o
90obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o 93obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
94obj-$(CONFIG_RTC_DRV_SPEAR) += rtc-spear.o
91obj-$(CONFIG_RTC_DRV_STARFIRE) += rtc-starfire.o 95obj-$(CONFIG_RTC_DRV_STARFIRE) += rtc-starfire.o
92obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o 96obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o
93obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o 97obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o
@@ -99,6 +103,7 @@ obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
99obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o 103obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o
100obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o 104obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
101obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o 105obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
106obj-$(CONFIG_RTC_DRV_VT8500) += rtc-vt8500.o
102obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o 107obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o
103obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o 108obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o
104obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o 109obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
diff --git a/drivers/rtc/rtc-em3027.c b/drivers/rtc/rtc-em3027.c
new file mode 100644
index 000000000000..d8e1c2578553
--- /dev/null
+++ b/drivers/rtc/rtc-em3027.c
@@ -0,0 +1,161 @@
1/*
2 * An rtc/i2c driver for the EM Microelectronic EM3027
3 * Copyright 2011 CompuLab, Ltd.
4 *
5 * Author: Mike Rapoport <mike@compulab.co.il>
6 *
7 * Based on rtc-ds1672.c by Alessandro Zummo <a.zummo@towertech.it>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/i2c.h>
15#include <linux/rtc.h>
16#include <linux/bcd.h>
17
18/* Registers */
19#define EM3027_REG_ON_OFF_CTRL 0x00
20#define EM3027_REG_IRQ_CTRL 0x01
21#define EM3027_REG_IRQ_FLAGS 0x02
22#define EM3027_REG_STATUS 0x03
23#define EM3027_REG_RST_CTRL 0x04
24
25#define EM3027_REG_WATCH_SEC 0x08
26#define EM3027_REG_WATCH_MIN 0x09
27#define EM3027_REG_WATCH_HOUR 0x0a
28#define EM3027_REG_WATCH_DATE 0x0b
29#define EM3027_REG_WATCH_DAY 0x0c
30#define EM3027_REG_WATCH_MON 0x0d
31#define EM3027_REG_WATCH_YEAR 0x0e
32
33#define EM3027_REG_ALARM_SEC 0x10
34#define EM3027_REG_ALARM_MIN 0x11
35#define EM3027_REG_ALARM_HOUR 0x12
36#define EM3027_REG_ALARM_DATE 0x13
37#define EM3027_REG_ALARM_DAY 0x14
38#define EM3027_REG_ALARM_MON 0x15
39#define EM3027_REG_ALARM_YEAR 0x16
40
41static struct i2c_driver em3027_driver;
42
43static int em3027_get_time(struct device *dev, struct rtc_time *tm)
44{
45 struct i2c_client *client = to_i2c_client(dev);
46
47 unsigned char addr = EM3027_REG_WATCH_SEC;
48 unsigned char buf[7];
49
50 struct i2c_msg msgs[] = {
51 {client->addr, 0, 1, &addr}, /* setup read addr */
52 {client->addr, I2C_M_RD, 7, buf}, /* read time/date */
53 };
54
55 /* read time/date registers */
56 if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
57 dev_err(&client->dev, "%s: read error\n", __func__);
58 return -EIO;
59 }
60
61 tm->tm_sec = bcd2bin(buf[0]);
62 tm->tm_min = bcd2bin(buf[1]);
63 tm->tm_hour = bcd2bin(buf[2]);
64 tm->tm_mday = bcd2bin(buf[3]);
65 tm->tm_wday = bcd2bin(buf[4]);
66 tm->tm_mon = bcd2bin(buf[5]);
67 tm->tm_year = bcd2bin(buf[6]) + 100;
68
69 return 0;
70}
71
72static int em3027_set_time(struct device *dev, struct rtc_time *tm)
73{
74 struct i2c_client *client = to_i2c_client(dev);
75 unsigned char buf[8];
76
77 struct i2c_msg msg = {
78 client->addr, 0, 8, buf, /* write time/date */
79 };
80
81 buf[0] = EM3027_REG_WATCH_SEC;
82 buf[1] = bin2bcd(tm->tm_sec);
83 buf[2] = bin2bcd(tm->tm_min);
84 buf[3] = bin2bcd(tm->tm_hour);
85 buf[4] = bin2bcd(tm->tm_mday);
86 buf[5] = bin2bcd(tm->tm_wday);
87 buf[6] = bin2bcd(tm->tm_mon);
88 buf[7] = bin2bcd(tm->tm_year % 100);
89
90 /* write time/date registers */
91 if ((i2c_transfer(client->adapter, &msg, 1)) != 1) {
92 dev_err(&client->dev, "%s: write error\n", __func__);
93 return -EIO;
94 }
95
96 return 0;
97}
98
99static const struct rtc_class_ops em3027_rtc_ops = {
100 .read_time = em3027_get_time,
101 .set_time = em3027_set_time,
102};
103
104static int em3027_probe(struct i2c_client *client,
105 const struct i2c_device_id *id)
106{
107 struct rtc_device *rtc;
108
109 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
110 return -ENODEV;
111
112 rtc = rtc_device_register(em3027_driver.driver.name, &client->dev,
113 &em3027_rtc_ops, THIS_MODULE);
114 if (IS_ERR(rtc))
115 return PTR_ERR(rtc);
116
117 i2c_set_clientdata(client, rtc);
118
119 return 0;
120}
121
122static int em3027_remove(struct i2c_client *client)
123{
124 struct rtc_device *rtc = i2c_get_clientdata(client);
125
126 if (rtc)
127 rtc_device_unregister(rtc);
128
129 return 0;
130}
131
132static struct i2c_device_id em3027_id[] = {
133 { "em3027", 0 },
134 { }
135};
136
137static struct i2c_driver em3027_driver = {
138 .driver = {
139 .name = "rtc-em3027",
140 },
141 .probe = &em3027_probe,
142 .remove = &em3027_remove,
143 .id_table = em3027_id,
144};
145
146static int __init em3027_init(void)
147{
148 return i2c_add_driver(&em3027_driver);
149}
150
151static void __exit em3027_exit(void)
152{
153 i2c_del_driver(&em3027_driver);
154}
155
156MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
157MODULE_DESCRIPTION("EM Microelectronic EM3027 RTC driver");
158MODULE_LICENSE("GPL");
159
160module_init(em3027_init);
161module_exit(em3027_exit);
diff --git a/drivers/rtc/rtc-m41t93.c b/drivers/rtc/rtc-m41t93.c
new file mode 100644
index 000000000000..1a84b3e227d1
--- /dev/null
+++ b/drivers/rtc/rtc-m41t93.c
@@ -0,0 +1,225 @@
1/*
2 *
3 * Driver for ST M41T93 SPI RTC
4 *
5 * (c) 2010 Nikolaus Voss, Weinmann Medical GmbH
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/bcd.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/rtc.h>
17#include <linux/spi/spi.h>
18
19#define M41T93_REG_SSEC 0
20#define M41T93_REG_ST_SEC 1
21#define M41T93_REG_MIN 2
22#define M41T93_REG_CENT_HOUR 3
23#define M41T93_REG_WDAY 4
24#define M41T93_REG_DAY 5
25#define M41T93_REG_MON 6
26#define M41T93_REG_YEAR 7
27
28
29#define M41T93_REG_ALM_HOUR_HT 0xc
30#define M41T93_REG_FLAGS 0xf
31
32#define M41T93_FLAG_ST (1 << 7)
33#define M41T93_FLAG_OF (1 << 2)
34#define M41T93_FLAG_BL (1 << 4)
35#define M41T93_FLAG_HT (1 << 6)
36
37static inline int m41t93_set_reg(struct spi_device *spi, u8 addr, u8 data)
38{
39 u8 buf[2];
40
41 /* MSB must be '1' to write */
42 buf[0] = addr | 0x80;
43 buf[1] = data;
44
45 return spi_write(spi, buf, sizeof(buf));
46}
47
48static int m41t93_set_time(struct device *dev, struct rtc_time *tm)
49{
50 struct spi_device *spi = to_spi_device(dev);
51 u8 buf[9] = {0x80}; /* write cmd + 8 data bytes */
52 u8 * const data = &buf[1]; /* ptr to first data byte */
53
54 dev_dbg(dev, "%s secs=%d, mins=%d, "
55 "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
56 "write", tm->tm_sec, tm->tm_min,
57 tm->tm_hour, tm->tm_mday,
58 tm->tm_mon, tm->tm_year, tm->tm_wday);
59
60 if (tm->tm_year < 100) {
61 dev_warn(&spi->dev, "unsupported date (before 2000-01-01).\n");
62 return -EINVAL;
63 }
64
65 data[M41T93_REG_SSEC] = 0;
66 data[M41T93_REG_ST_SEC] = bin2bcd(tm->tm_sec);
67 data[M41T93_REG_MIN] = bin2bcd(tm->tm_min);
68 data[M41T93_REG_CENT_HOUR] = bin2bcd(tm->tm_hour) |
69 ((tm->tm_year/100-1) << 6);
70 data[M41T93_REG_DAY] = bin2bcd(tm->tm_mday);
71 data[M41T93_REG_WDAY] = bin2bcd(tm->tm_wday + 1);
72 data[M41T93_REG_MON] = bin2bcd(tm->tm_mon + 1);
73 data[M41T93_REG_YEAR] = bin2bcd(tm->tm_year % 100);
74
75 return spi_write(spi, buf, sizeof(buf));
76}
77
78
79static int m41t93_get_time(struct device *dev, struct rtc_time *tm)
80{
81 struct spi_device *spi = to_spi_device(dev);
82 const u8 start_addr = 0;
83 u8 buf[8];
84 int century_after_1900;
85 int tmp;
86 int ret = 0;
87
88 /* Check status of clock. Two states must be considered:
89 1. halt bit (HT) is set: the clock is running but update of readout
90 registers has been disabled due to power failure. This is normal
91 case after poweron. Time is valid after resetting HT bit.
92 2. oscillator fail bit (OF) is set. Oscillator has be stopped and
93 time is invalid:
94 a) OF can be immeditely reset.
95 b) OF cannot be immediately reset: oscillator has to be restarted.
96 */
97 tmp = spi_w8r8(spi, M41T93_REG_ALM_HOUR_HT);
98 if (tmp < 0)
99 return tmp;
100
101 if (tmp & M41T93_FLAG_HT) {
102 dev_dbg(&spi->dev, "HT bit is set, reenable clock update.\n");
103 m41t93_set_reg(spi, M41T93_REG_ALM_HOUR_HT,
104 tmp & ~M41T93_FLAG_HT);
105 }
106
107 tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
108 if (tmp < 0)
109 return tmp;
110
111 if (tmp & M41T93_FLAG_OF) {
112 ret = -EINVAL;
113 dev_warn(&spi->dev, "OF bit is set, resetting.\n");
114 m41t93_set_reg(spi, M41T93_REG_FLAGS, tmp & ~M41T93_FLAG_OF);
115
116 tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
117 if (tmp < 0)
118 return tmp;
119 else if (tmp & M41T93_FLAG_OF) {
120 u8 reset_osc = buf[M41T93_REG_ST_SEC] | M41T93_FLAG_ST;
121
122 dev_warn(&spi->dev,
123 "OF bit is still set, kickstarting clock.\n");
124 m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
125 reset_osc &= ~M41T93_FLAG_ST;
126 m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
127 }
128 }
129
130 if (tmp & M41T93_FLAG_BL)
131 dev_warn(&spi->dev, "BL bit is set, replace battery.\n");
132
133 /* read actual time/date */
134 tmp = spi_write_then_read(spi, &start_addr, 1, buf, sizeof(buf));
135 if (tmp < 0)
136 return tmp;
137
138 tm->tm_sec = bcd2bin(buf[M41T93_REG_ST_SEC]);
139 tm->tm_min = bcd2bin(buf[M41T93_REG_MIN]);
140 tm->tm_hour = bcd2bin(buf[M41T93_REG_CENT_HOUR] & 0x3f);
141 tm->tm_mday = bcd2bin(buf[M41T93_REG_DAY]);
142 tm->tm_mon = bcd2bin(buf[M41T93_REG_MON]) - 1;
143 tm->tm_wday = bcd2bin(buf[M41T93_REG_WDAY] & 0x0f) - 1;
144
145 century_after_1900 = (buf[M41T93_REG_CENT_HOUR] >> 6) + 1;
146 tm->tm_year = bcd2bin(buf[M41T93_REG_YEAR]) + century_after_1900 * 100;
147
148 dev_dbg(dev, "%s secs=%d, mins=%d, "
149 "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
150 "read", tm->tm_sec, tm->tm_min,
151 tm->tm_hour, tm->tm_mday,
152 tm->tm_mon, tm->tm_year, tm->tm_wday);
153
154 return ret < 0 ? ret : rtc_valid_tm(tm);
155}
156
157
158static const struct rtc_class_ops m41t93_rtc_ops = {
159 .read_time = m41t93_get_time,
160 .set_time = m41t93_set_time,
161};
162
163static struct spi_driver m41t93_driver;
164
165static int __devinit m41t93_probe(struct spi_device *spi)
166{
167 struct rtc_device *rtc;
168 int res;
169
170 spi->bits_per_word = 8;
171 spi_setup(spi);
172
173 res = spi_w8r8(spi, M41T93_REG_WDAY);
174 if (res < 0 || (res & 0xf8) != 0) {
175 dev_err(&spi->dev, "not found 0x%x.\n", res);
176 return -ENODEV;
177 }
178
179 rtc = rtc_device_register(m41t93_driver.driver.name,
180 &spi->dev, &m41t93_rtc_ops, THIS_MODULE);
181 if (IS_ERR(rtc))
182 return PTR_ERR(rtc);
183
184 dev_set_drvdata(&spi->dev, rtc);
185
186 return 0;
187}
188
189
190static int __devexit m41t93_remove(struct spi_device *spi)
191{
192 struct rtc_device *rtc = platform_get_drvdata(spi);
193
194 if (rtc)
195 rtc_device_unregister(rtc);
196
197 return 0;
198}
199
200static struct spi_driver m41t93_driver = {
201 .driver = {
202 .name = "rtc-m41t93",
203 .bus = &spi_bus_type,
204 .owner = THIS_MODULE,
205 },
206 .probe = m41t93_probe,
207 .remove = __devexit_p(m41t93_remove),
208};
209
210static __init int m41t93_init(void)
211{
212 return spi_register_driver(&m41t93_driver);
213}
214module_init(m41t93_init);
215
216static __exit void m41t93_exit(void)
217{
218 spi_unregister_driver(&m41t93_driver);
219}
220module_exit(m41t93_exit);
221
222MODULE_AUTHOR("Nikolaus Voss <n.voss@weinmann.de>");
223MODULE_DESCRIPTION("Driver for ST M41T93 SPI RTC");
224MODULE_LICENSE("GPL");
225MODULE_ALIAS("spi:rtc-m41t93");
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index b2f096871a97..0cec5650d56a 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -380,7 +380,7 @@ cleanup1:
380cleanup0: 380cleanup0:
381 dev_set_drvdata(dev, NULL); 381 dev_set_drvdata(dev, NULL);
382 mrst_rtc.dev = NULL; 382 mrst_rtc.dev = NULL;
383 release_region(iomem->start, iomem->end + 1 - iomem->start); 383 release_mem_region(iomem->start, resource_size(iomem));
384 dev_err(dev, "rtc-mrst: unable to initialise\n"); 384 dev_err(dev, "rtc-mrst: unable to initialise\n");
385 return retval; 385 return retval;
386} 386}
@@ -406,7 +406,7 @@ static void __devexit rtc_mrst_do_remove(struct device *dev)
406 mrst->rtc = NULL; 406 mrst->rtc = NULL;
407 407
408 iomem = mrst->iomem; 408 iomem = mrst->iomem;
409 release_region(iomem->start, iomem->end + 1 - iomem->start); 409 release_mem_region(iomem->start, resource_size(iomem));
410 mrst->iomem = NULL; 410 mrst->iomem = NULL;
411 411
412 mrst->dev = NULL; 412 mrst->dev = NULL;
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index d814417bee8c..39e41fbdf08b 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -55,12 +55,6 @@ static const u32 PIE_BIT_DEF[MAX_PIE_NUM][2] = {
55 { MAX_PIE_FREQ, RTC_SAM7_BIT }, 55 { MAX_PIE_FREQ, RTC_SAM7_BIT },
56}; 56};
57 57
58/* Those are the bits from a classic RTC we want to mimic */
59#define RTC_IRQF 0x80 /* any of the following 3 is active */
60#define RTC_PF 0x40 /* Periodic interrupt */
61#define RTC_AF 0x20 /* Alarm interrupt */
62#define RTC_UF 0x10 /* Update interrupt for 1Hz RTC */
63
64#define MXC_RTC_TIME 0 58#define MXC_RTC_TIME 0
65#define MXC_RTC_ALARM 1 59#define MXC_RTC_ALARM 1
66 60
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
index f90c574f9d05..0c423892923c 100644
--- a/drivers/rtc/rtc-pcf50633.c
+++ b/drivers/rtc/rtc-pcf50633.c
@@ -58,7 +58,6 @@ struct pcf50633_time {
58 58
59struct pcf50633_rtc { 59struct pcf50633_rtc {
60 int alarm_enabled; 60 int alarm_enabled;
61 int second_enabled;
62 int alarm_pending; 61 int alarm_pending;
63 62
64 struct pcf50633 *pcf; 63 struct pcf50633 *pcf;
@@ -143,7 +142,7 @@ static int pcf50633_rtc_set_time(struct device *dev, struct rtc_time *tm)
143{ 142{
144 struct pcf50633_rtc *rtc; 143 struct pcf50633_rtc *rtc;
145 struct pcf50633_time pcf_tm; 144 struct pcf50633_time pcf_tm;
146 int second_masked, alarm_masked, ret = 0; 145 int alarm_masked, ret = 0;
147 146
148 rtc = dev_get_drvdata(dev); 147 rtc = dev_get_drvdata(dev);
149 148
@@ -162,11 +161,8 @@ static int pcf50633_rtc_set_time(struct device *dev, struct rtc_time *tm)
162 pcf_tm.time[PCF50633_TI_SEC]); 161 pcf_tm.time[PCF50633_TI_SEC]);
163 162
164 163
165 second_masked = pcf50633_irq_mask_get(rtc->pcf, PCF50633_IRQ_SECOND);
166 alarm_masked = pcf50633_irq_mask_get(rtc->pcf, PCF50633_IRQ_ALARM); 164 alarm_masked = pcf50633_irq_mask_get(rtc->pcf, PCF50633_IRQ_ALARM);
167 165
168 if (!second_masked)
169 pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_SECOND);
170 if (!alarm_masked) 166 if (!alarm_masked)
171 pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_ALARM); 167 pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_ALARM);
172 168
@@ -175,8 +171,6 @@ static int pcf50633_rtc_set_time(struct device *dev, struct rtc_time *tm)
175 PCF50633_TI_EXTENT, 171 PCF50633_TI_EXTENT,
176 &pcf_tm.time[0]); 172 &pcf_tm.time[0]);
177 173
178 if (!second_masked)
179 pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_SECOND);
180 if (!alarm_masked) 174 if (!alarm_masked)
181 pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM); 175 pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM);
182 176
@@ -250,15 +244,8 @@ static void pcf50633_rtc_irq(int irq, void *data)
250{ 244{
251 struct pcf50633_rtc *rtc = data; 245 struct pcf50633_rtc *rtc = data;
252 246
253 switch (irq) { 247 rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF);
254 case PCF50633_IRQ_ALARM: 248 rtc->alarm_pending = 1;
255 rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF);
256 rtc->alarm_pending = 1;
257 break;
258 case PCF50633_IRQ_SECOND:
259 rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF);
260 break;
261 }
262} 249}
263 250
264static int __devinit pcf50633_rtc_probe(struct platform_device *pdev) 251static int __devinit pcf50633_rtc_probe(struct platform_device *pdev)
@@ -282,9 +269,6 @@ static int __devinit pcf50633_rtc_probe(struct platform_device *pdev)
282 269
283 pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_ALARM, 270 pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_ALARM,
284 pcf50633_rtc_irq, rtc); 271 pcf50633_rtc_irq, rtc);
285 pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_SECOND,
286 pcf50633_rtc_irq, rtc);
287
288 return 0; 272 return 0;
289} 273}
290 274
@@ -295,7 +279,6 @@ static int __devexit pcf50633_rtc_remove(struct platform_device *pdev)
295 rtc = platform_get_drvdata(pdev); 279 rtc = platform_get_drvdata(pdev);
296 280
297 pcf50633_free_irq(rtc->pcf, PCF50633_IRQ_ALARM); 281 pcf50633_free_irq(rtc->pcf, PCF50633_IRQ_ALARM);
298 pcf50633_free_irq(rtc->pcf, PCF50633_IRQ_SECOND);
299 282
300 rtc_device_unregister(rtc->rtc_dev); 283 rtc_device_unregister(rtc->rtc_dev);
301 kfree(rtc); 284 kfree(rtc);
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
new file mode 100644
index 000000000000..ea09ff211dc6
--- /dev/null
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -0,0 +1,454 @@
1/*
2 * Micro Crystal RV-3029C2 rtc class driver
3 *
4 * Author: Gregory Hermant <gregory.hermant@calao-systems.com>
5 *
6 * based on previously existing rtc class drivers
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * NOTE: Currently this driver only supports the bare minimum for read
13 * and write the RTC and alarms. The extra features provided by this chip
14 * (trickle charger, eeprom, T° compensation) are unavailable.
15 */
16
17#include <linux/module.h>
18#include <linux/i2c.h>
19#include <linux/bcd.h>
20#include <linux/rtc.h>
21
22/* Register map */
23/* control section */
24#define RV3029C2_ONOFF_CTRL 0x00
25#define RV3029C2_IRQ_CTRL 0x01
26#define RV3029C2_IRQ_CTRL_AIE (1 << 0)
27#define RV3029C2_IRQ_FLAGS 0x02
28#define RV3029C2_IRQ_FLAGS_AF (1 << 0)
29#define RV3029C2_STATUS 0x03
30#define RV3029C2_STATUS_VLOW1 (1 << 2)
31#define RV3029C2_STATUS_VLOW2 (1 << 3)
32#define RV3029C2_STATUS_SR (1 << 4)
33#define RV3029C2_STATUS_PON (1 << 5)
34#define RV3029C2_STATUS_EEBUSY (1 << 7)
35#define RV3029C2_RST_CTRL 0x04
36#define RV3029C2_CONTROL_SECTION_LEN 0x05
37
38/* watch section */
39#define RV3029C2_W_SEC 0x08
40#define RV3029C2_W_MINUTES 0x09
41#define RV3029C2_W_HOURS 0x0A
42#define RV3029C2_REG_HR_12_24 (1<<6) /* 24h/12h mode */
43#define RV3029C2_REG_HR_PM (1<<5) /* PM/AM bit in 12h mode */
44#define RV3029C2_W_DATE 0x0B
45#define RV3029C2_W_DAYS 0x0C
46#define RV3029C2_W_MONTHS 0x0D
47#define RV3029C2_W_YEARS 0x0E
48#define RV3029C2_WATCH_SECTION_LEN 0x07
49
50/* alarm section */
51#define RV3029C2_A_SC 0x10
52#define RV3029C2_A_MN 0x11
53#define RV3029C2_A_HR 0x12
54#define RV3029C2_A_DT 0x13
55#define RV3029C2_A_DW 0x14
56#define RV3029C2_A_MO 0x15
57#define RV3029C2_A_YR 0x16
58#define RV3029C2_ALARM_SECTION_LEN 0x07
59
60/* timer section */
61#define RV3029C2_TIMER_LOW 0x18
62#define RV3029C2_TIMER_HIGH 0x19
63
64/* temperature section */
65#define RV3029C2_TEMP_PAGE 0x20
66
67/* eeprom data section */
68#define RV3029C2_E2P_EEDATA1 0x28
69#define RV3029C2_E2P_EEDATA2 0x29
70
71/* eeprom control section */
72#define RV3029C2_CONTROL_E2P_EECTRL 0x30
73#define RV3029C2_TRICKLE_1K (1<<0) /* 1K resistance */
74#define RV3029C2_TRICKLE_5K (1<<1) /* 5K resistance */
75#define RV3029C2_TRICKLE_20K (1<<2) /* 20K resistance */
76#define RV3029C2_TRICKLE_80K (1<<3) /* 80K resistance */
77#define RV3029C2_CONTROL_E2P_XTALOFFSET 0x31
78#define RV3029C2_CONTROL_E2P_QCOEF 0x32
79#define RV3029C2_CONTROL_E2P_TURNOVER 0x33
80
81/* user ram section */
82#define RV3029C2_USR1_RAM_PAGE 0x38
83#define RV3029C2_USR1_SECTION_LEN 0x04
84#define RV3029C2_USR2_RAM_PAGE 0x3C
85#define RV3029C2_USR2_SECTION_LEN 0x04
86
87static int
88rv3029c2_i2c_read_regs(struct i2c_client *client, u8 reg, u8 *buf,
89 unsigned len)
90{
91 int ret;
92
93 if ((reg > RV3029C2_USR1_RAM_PAGE + 7) ||
94 (reg + len > RV3029C2_USR1_RAM_PAGE + 8))
95 return -EINVAL;
96
97 ret = i2c_smbus_read_i2c_block_data(client, reg, len, buf);
98 if (ret < 0)
99 return ret;
100 if (ret < len)
101 return -EIO;
102 return 0;
103}
104
105static int
106rv3029c2_i2c_write_regs(struct i2c_client *client, u8 reg, u8 const buf[],
107 unsigned len)
108{
109 if ((reg > RV3029C2_USR1_RAM_PAGE + 7) ||
110 (reg + len > RV3029C2_USR1_RAM_PAGE + 8))
111 return -EINVAL;
112
113 return i2c_smbus_write_i2c_block_data(client, reg, len, buf);
114}
115
116static int
117rv3029c2_i2c_get_sr(struct i2c_client *client, u8 *buf)
118{
119 int ret = rv3029c2_i2c_read_regs(client, RV3029C2_STATUS, buf, 1);
120
121 if (ret < 0)
122 return -EIO;
123 dev_dbg(&client->dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]);
124 return 0;
125}
126
127static int
128rv3029c2_i2c_set_sr(struct i2c_client *client, u8 val)
129{
130 u8 buf[1];
131 int sr;
132
133 buf[0] = val;
134 sr = rv3029c2_i2c_write_regs(client, RV3029C2_STATUS, buf, 1);
135 dev_dbg(&client->dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]);
136 if (sr < 0)
137 return -EIO;
138 return 0;
139}
140
141static int
142rv3029c2_i2c_read_time(struct i2c_client *client, struct rtc_time *tm)
143{
144 u8 buf[1];
145 int ret;
146 u8 regs[RV3029C2_WATCH_SECTION_LEN] = { 0, };
147
148 ret = rv3029c2_i2c_get_sr(client, buf);
149 if (ret < 0) {
150 dev_err(&client->dev, "%s: reading SR failed\n", __func__);
151 return -EIO;
152 }
153
154 ret = rv3029c2_i2c_read_regs(client, RV3029C2_W_SEC , regs,
155 RV3029C2_WATCH_SECTION_LEN);
156 if (ret < 0) {
157 dev_err(&client->dev, "%s: reading RTC section failed\n",
158 __func__);
159 return ret;
160 }
161
162 tm->tm_sec = bcd2bin(regs[RV3029C2_W_SEC-RV3029C2_W_SEC]);
163 tm->tm_min = bcd2bin(regs[RV3029C2_W_MINUTES-RV3029C2_W_SEC]);
164
165 /* HR field has a more complex interpretation */
166 {
167 const u8 _hr = regs[RV3029C2_W_HOURS-RV3029C2_W_SEC];
168 if (_hr & RV3029C2_REG_HR_12_24) {
169 /* 12h format */
170 tm->tm_hour = bcd2bin(_hr & 0x1f);
171 if (_hr & RV3029C2_REG_HR_PM) /* PM flag set */
172 tm->tm_hour += 12;
173 } else /* 24h format */
174 tm->tm_hour = bcd2bin(_hr & 0x3f);
175 }
176
177 tm->tm_mday = bcd2bin(regs[RV3029C2_W_DATE-RV3029C2_W_SEC]);
178 tm->tm_mon = bcd2bin(regs[RV3029C2_W_MONTHS-RV3029C2_W_SEC]) - 1;
179 tm->tm_year = bcd2bin(regs[RV3029C2_W_YEARS-RV3029C2_W_SEC]) + 100;
180 tm->tm_wday = bcd2bin(regs[RV3029C2_W_DAYS-RV3029C2_W_SEC]) - 1;
181
182 return 0;
183}
184
185static int rv3029c2_rtc_read_time(struct device *dev, struct rtc_time *tm)
186{
187 return rv3029c2_i2c_read_time(to_i2c_client(dev), tm);
188}
189
190static int
191rv3029c2_i2c_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm)
192{
193 struct rtc_time *const tm = &alarm->time;
194 int ret;
195 u8 regs[8];
196
197 ret = rv3029c2_i2c_get_sr(client, regs);
198 if (ret < 0) {
199 dev_err(&client->dev, "%s: reading SR failed\n", __func__);
200 return -EIO;
201 }
202
203 ret = rv3029c2_i2c_read_regs(client, RV3029C2_A_SC, regs,
204 RV3029C2_ALARM_SECTION_LEN);
205
206 if (ret < 0) {
207 dev_err(&client->dev, "%s: reading alarm section failed\n",
208 __func__);
209 return ret;
210 }
211
212 tm->tm_sec = bcd2bin(regs[RV3029C2_A_SC-RV3029C2_A_SC] & 0x7f);
213 tm->tm_min = bcd2bin(regs[RV3029C2_A_MN-RV3029C2_A_SC] & 0x7f);
214 tm->tm_hour = bcd2bin(regs[RV3029C2_A_HR-RV3029C2_A_SC] & 0x3f);
215 tm->tm_mday = bcd2bin(regs[RV3029C2_A_DT-RV3029C2_A_SC] & 0x3f);
216 tm->tm_mon = bcd2bin(regs[RV3029C2_A_MO-RV3029C2_A_SC] & 0x1f) - 1;
217 tm->tm_year = bcd2bin(regs[RV3029C2_A_YR-RV3029C2_A_SC] & 0x7f) + 100;
218 tm->tm_wday = bcd2bin(regs[RV3029C2_A_DW-RV3029C2_A_SC] & 0x07) - 1;
219
220 return 0;
221}
222
223static int
224rv3029c2_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
225{
226 return rv3029c2_i2c_read_alarm(to_i2c_client(dev), alarm);
227}
228
229static int rv3029c2_rtc_i2c_alarm_set_irq(struct i2c_client *client,
230 int enable)
231{
232 int ret;
233 u8 buf[1];
234
235 /* enable AIE irq */
236 ret = rv3029c2_i2c_read_regs(client, RV3029C2_IRQ_CTRL, buf, 1);
237 if (ret < 0) {
238 dev_err(&client->dev, "can't read INT reg\n");
239 return ret;
240 }
241 if (enable)
242 buf[0] |= RV3029C2_IRQ_CTRL_AIE;
243 else
244 buf[0] &= ~RV3029C2_IRQ_CTRL_AIE;
245
246 ret = rv3029c2_i2c_write_regs(client, RV3029C2_IRQ_CTRL, buf, 1);
247 if (ret < 0) {
248 dev_err(&client->dev, "can't set INT reg\n");
249 return ret;
250 }
251
252 return 0;
253}
254
255static int rv3029c2_rtc_i2c_set_alarm(struct i2c_client *client,
256 struct rtc_wkalrm *alarm)
257{
258 struct rtc_time *const tm = &alarm->time;
259 int ret;
260 u8 regs[8];
261
262 /*
263 * The clock has an 8 bit wide bcd-coded register (they never learn)
264 * for the year. tm_year is an offset from 1900 and we are interested
265 * in the 2000-2099 range, so any value less than 100 is invalid.
266 */
267 if (tm->tm_year < 100)
268 return -EINVAL;
269
270 ret = rv3029c2_i2c_get_sr(client, regs);
271 if (ret < 0) {
272 dev_err(&client->dev, "%s: reading SR failed\n", __func__);
273 return -EIO;
274 }
275 regs[RV3029C2_A_SC-RV3029C2_A_SC] = bin2bcd(tm->tm_sec & 0x7f);
276 regs[RV3029C2_A_MN-RV3029C2_A_SC] = bin2bcd(tm->tm_min & 0x7f);
277 regs[RV3029C2_A_HR-RV3029C2_A_SC] = bin2bcd(tm->tm_hour & 0x3f);
278 regs[RV3029C2_A_DT-RV3029C2_A_SC] = bin2bcd(tm->tm_mday & 0x3f);
279 regs[RV3029C2_A_MO-RV3029C2_A_SC] = bin2bcd((tm->tm_mon & 0x1f) - 1);
280 regs[RV3029C2_A_DW-RV3029C2_A_SC] = bin2bcd((tm->tm_wday & 7) - 1);
281 regs[RV3029C2_A_YR-RV3029C2_A_SC] = bin2bcd((tm->tm_year & 0x7f) - 100);
282
283 ret = rv3029c2_i2c_write_regs(client, RV3029C2_A_SC, regs,
284 RV3029C2_ALARM_SECTION_LEN);
285 if (ret < 0)
286 return ret;
287
288 if (alarm->enabled) {
289 u8 buf[1];
290
291 /* clear AF flag */
292 ret = rv3029c2_i2c_read_regs(client, RV3029C2_IRQ_FLAGS,
293 buf, 1);
294 if (ret < 0) {
295 dev_err(&client->dev, "can't read alarm flag\n");
296 return ret;
297 }
298 buf[0] &= ~RV3029C2_IRQ_FLAGS_AF;
299 ret = rv3029c2_i2c_write_regs(client, RV3029C2_IRQ_FLAGS,
300 buf, 1);
301 if (ret < 0) {
302 dev_err(&client->dev, "can't set alarm flag\n");
303 return ret;
304 }
305 /* enable AIE irq */
306 ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 1);
307 if (ret)
308 return ret;
309
310 dev_dbg(&client->dev, "alarm IRQ armed\n");
311 } else {
312 /* disable AIE irq */
313 ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 1);
314 if (ret)
315 return ret;
316
317 dev_dbg(&client->dev, "alarm IRQ disabled\n");
318 }
319
320 return 0;
321}
322
323static int rv3029c2_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
324{
325 return rv3029c2_rtc_i2c_set_alarm(to_i2c_client(dev), alarm);
326}
327
328static int
329rv3029c2_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm)
330{
331 u8 regs[8];
332 int ret;
333
334 /*
335 * The clock has an 8 bit wide bcd-coded register (they never learn)
336 * for the year. tm_year is an offset from 1900 and we are interested
337 * in the 2000-2099 range, so any value less than 100 is invalid.
338 */
339 if (tm->tm_year < 100)
340 return -EINVAL;
341
342 regs[RV3029C2_W_SEC-RV3029C2_W_SEC] = bin2bcd(tm->tm_sec);
343 regs[RV3029C2_W_MINUTES-RV3029C2_W_SEC] = bin2bcd(tm->tm_min);
344 regs[RV3029C2_W_HOURS-RV3029C2_W_SEC] = bin2bcd(tm->tm_hour);
345 regs[RV3029C2_W_DATE-RV3029C2_W_SEC] = bin2bcd(tm->tm_mday);
346 regs[RV3029C2_W_MONTHS-RV3029C2_W_SEC] = bin2bcd(tm->tm_mon+1);
347 regs[RV3029C2_W_DAYS-RV3029C2_W_SEC] = bin2bcd((tm->tm_wday & 7)+1);
348 regs[RV3029C2_W_YEARS-RV3029C2_W_SEC] = bin2bcd(tm->tm_year - 100);
349
350 ret = rv3029c2_i2c_write_regs(client, RV3029C2_W_SEC, regs,
351 RV3029C2_WATCH_SECTION_LEN);
352 if (ret < 0)
353 return ret;
354
355 ret = rv3029c2_i2c_get_sr(client, regs);
356 if (ret < 0) {
357 dev_err(&client->dev, "%s: reading SR failed\n", __func__);
358 return ret;
359 }
360 /* clear PON bit */
361 ret = rv3029c2_i2c_set_sr(client, (regs[0] & ~RV3029C2_STATUS_PON));
362 if (ret < 0) {
363 dev_err(&client->dev, "%s: reading SR failed\n", __func__);
364 return ret;
365 }
366
367 return 0;
368}
369
370static int rv3029c2_rtc_set_time(struct device *dev, struct rtc_time *tm)
371{
372 return rv3029c2_i2c_set_time(to_i2c_client(dev), tm);
373}
374
375static const struct rtc_class_ops rv3029c2_rtc_ops = {
376 .read_time = rv3029c2_rtc_read_time,
377 .set_time = rv3029c2_rtc_set_time,
378 .read_alarm = rv3029c2_rtc_read_alarm,
379 .set_alarm = rv3029c2_rtc_set_alarm,
380};
381
382static struct i2c_device_id rv3029c2_id[] = {
383 { "rv3029c2", 0 },
384 { }
385};
386MODULE_DEVICE_TABLE(i2c, rv3029c2_id);
387
388static int __devinit
389rv3029c2_probe(struct i2c_client *client, const struct i2c_device_id *id)
390{
391 struct rtc_device *rtc;
392 int rc = 0;
393 u8 buf[1];
394
395 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_EMUL))
396 return -ENODEV;
397
398 rtc = rtc_device_register(client->name,
399 &client->dev, &rv3029c2_rtc_ops,
400 THIS_MODULE);
401
402 if (IS_ERR(rtc))
403 return PTR_ERR(rtc);
404
405 i2c_set_clientdata(client, rtc);
406
407 rc = rv3029c2_i2c_get_sr(client, buf);
408 if (rc < 0) {
409 dev_err(&client->dev, "reading status failed\n");
410 goto exit_unregister;
411 }
412
413 return 0;
414
415exit_unregister:
416 rtc_device_unregister(rtc);
417
418 return rc;
419}
420
421static int __devexit rv3029c2_remove(struct i2c_client *client)
422{
423 struct rtc_device *rtc = i2c_get_clientdata(client);
424
425 rtc_device_unregister(rtc);
426
427 return 0;
428}
429
430static struct i2c_driver rv3029c2_driver = {
431 .driver = {
432 .name = "rtc-rv3029c2",
433 },
434 .probe = rv3029c2_probe,
435 .remove = __devexit_p(rv3029c2_remove),
436 .id_table = rv3029c2_id,
437};
438
439static int __init rv3029c2_init(void)
440{
441 return i2c_add_driver(&rv3029c2_driver);
442}
443
444static void __exit rv3029c2_exit(void)
445{
446 i2c_del_driver(&rv3029c2_driver);
447}
448
449module_init(rv3029c2_init);
450module_exit(rv3029c2_exit);
451
452MODULE_AUTHOR("Gregory Hermant <gregory.hermant@calao-systems.com>");
453MODULE_DESCRIPTION("Micro Crystal RV3029C2 RTC driver");
454MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
new file mode 100644
index 000000000000..893bac2bb21b
--- /dev/null
+++ b/drivers/rtc/rtc-spear.c
@@ -0,0 +1,534 @@
1/*
2 * drivers/rtc/rtc-spear.c
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Rajeev Kumar<rajeev-dlh.kumar@st.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/bcd.h>
13#include <linux/clk.h>
14#include <linux/delay.h>
15#include <linux/init.h>
16#include <linux/io.h>
17#include <linux/irq.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/rtc.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23
24/* RTC registers */
25#define TIME_REG 0x00
26#define DATE_REG 0x04
27#define ALARM_TIME_REG 0x08
28#define ALARM_DATE_REG 0x0C
29#define CTRL_REG 0x10
30#define STATUS_REG 0x14
31
32/* TIME_REG & ALARM_TIME_REG */
33#define SECONDS_UNITS (0xf<<0) /* seconds units position */
34#define SECONDS_TENS (0x7<<4) /* seconds tens position */
35#define MINUTES_UNITS (0xf<<8) /* minutes units position */
36#define MINUTES_TENS (0x7<<12) /* minutes tens position */
37#define HOURS_UNITS (0xf<<16) /* hours units position */
38#define HOURS_TENS (0x3<<20) /* hours tens position */
39
40/* DATE_REG & ALARM_DATE_REG */
41#define DAYS_UNITS (0xf<<0) /* days units position */
42#define DAYS_TENS (0x3<<4) /* days tens position */
43#define MONTHS_UNITS (0xf<<8) /* months units position */
44#define MONTHS_TENS (0x1<<12) /* months tens position */
45#define YEARS_UNITS (0xf<<16) /* years units position */
46#define YEARS_TENS (0xf<<20) /* years tens position */
47#define YEARS_HUNDREDS (0xf<<24) /* years hundereds position */
48#define YEARS_MILLENIUMS (0xf<<28) /* years millenium position */
49
50/* MASK SHIFT TIME_REG & ALARM_TIME_REG*/
51#define SECOND_SHIFT 0x00 /* seconds units */
52#define MINUTE_SHIFT 0x08 /* minutes units position */
53#define HOUR_SHIFT 0x10 /* hours units position */
54#define MDAY_SHIFT 0x00 /* Month day shift */
55#define MONTH_SHIFT 0x08 /* Month shift */
56#define YEAR_SHIFT 0x10 /* Year shift */
57
58#define SECOND_MASK 0x7F
59#define MIN_MASK 0x7F
60#define HOUR_MASK 0x3F
61#define DAY_MASK 0x3F
62#define MONTH_MASK 0x7F
63#define YEAR_MASK 0xFFFF
64
65/* date reg equal to time reg, for debug only */
66#define TIME_BYP (1<<9)
67#define INT_ENABLE (1<<31) /* interrupt enable */
68
69/* STATUS_REG */
70#define CLK_UNCONNECTED (1<<0)
71#define PEND_WR_TIME (1<<2)
72#define PEND_WR_DATE (1<<3)
73#define LOST_WR_TIME (1<<4)
74#define LOST_WR_DATE (1<<5)
75#define RTC_INT_MASK (1<<31)
76#define STATUS_BUSY (PEND_WR_TIME | PEND_WR_DATE)
77#define STATUS_FAIL (LOST_WR_TIME | LOST_WR_DATE)
78
79struct spear_rtc_config {
80 struct clk *clk;
81 spinlock_t lock;
82 void __iomem *ioaddr;
83};
84
85static inline void spear_rtc_clear_interrupt(struct spear_rtc_config *config)
86{
87 unsigned int val;
88 unsigned long flags;
89
90 spin_lock_irqsave(&config->lock, flags);
91 val = readl(config->ioaddr + STATUS_REG);
92 val |= RTC_INT_MASK;
93 writel(val, config->ioaddr + STATUS_REG);
94 spin_unlock_irqrestore(&config->lock, flags);
95}
96
97static inline void spear_rtc_enable_interrupt(struct spear_rtc_config *config)
98{
99 unsigned int val;
100
101 val = readl(config->ioaddr + CTRL_REG);
102 if (!(val & INT_ENABLE)) {
103 spear_rtc_clear_interrupt(config);
104 val |= INT_ENABLE;
105 writel(val, config->ioaddr + CTRL_REG);
106 }
107}
108
109static inline void spear_rtc_disable_interrupt(struct spear_rtc_config *config)
110{
111 unsigned int val;
112
113 val = readl(config->ioaddr + CTRL_REG);
114 if (val & INT_ENABLE) {
115 val &= ~INT_ENABLE;
116 writel(val, config->ioaddr + CTRL_REG);
117 }
118}
119
120static inline int is_write_complete(struct spear_rtc_config *config)
121{
122 int ret = 0;
123 unsigned long flags;
124
125 spin_lock_irqsave(&config->lock, flags);
126 if ((readl(config->ioaddr + STATUS_REG)) & STATUS_FAIL)
127 ret = -EIO;
128 spin_unlock_irqrestore(&config->lock, flags);
129
130 return ret;
131}
132
133static void rtc_wait_not_busy(struct spear_rtc_config *config)
134{
135 int status, count = 0;
136 unsigned long flags;
137
138 /* Assuming BUSY may stay active for 80 msec) */
139 for (count = 0; count < 80; count++) {
140 spin_lock_irqsave(&config->lock, flags);
141 status = readl(config->ioaddr + STATUS_REG);
142 spin_unlock_irqrestore(&config->lock, flags);
143 if ((status & STATUS_BUSY) == 0)
144 break;
145 /* check status busy, after each msec */
146 msleep(1);
147 }
148}
149
150static irqreturn_t spear_rtc_irq(int irq, void *dev_id)
151{
152 struct rtc_device *rtc = (struct rtc_device *)dev_id;
153 struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
154 unsigned long flags, events = 0;
155 unsigned int irq_data;
156
157 spin_lock_irqsave(&config->lock, flags);
158 irq_data = readl(config->ioaddr + STATUS_REG);
159 spin_unlock_irqrestore(&config->lock, flags);
160
161 if ((irq_data & RTC_INT_MASK)) {
162 spear_rtc_clear_interrupt(config);
163 events = RTC_IRQF | RTC_AF;
164 rtc_update_irq(rtc, 1, events);
165 return IRQ_HANDLED;
166 } else
167 return IRQ_NONE;
168
169}
170
171static int tm2bcd(struct rtc_time *tm)
172{
173 if (rtc_valid_tm(tm) != 0)
174 return -EINVAL;
175 tm->tm_sec = bin2bcd(tm->tm_sec);
176 tm->tm_min = bin2bcd(tm->tm_min);
177 tm->tm_hour = bin2bcd(tm->tm_hour);
178 tm->tm_mday = bin2bcd(tm->tm_mday);
179 tm->tm_mon = bin2bcd(tm->tm_mon + 1);
180 tm->tm_year = bin2bcd(tm->tm_year);
181
182 return 0;
183}
184
185static void bcd2tm(struct rtc_time *tm)
186{
187 tm->tm_sec = bcd2bin(tm->tm_sec);
188 tm->tm_min = bcd2bin(tm->tm_min);
189 tm->tm_hour = bcd2bin(tm->tm_hour);
190 tm->tm_mday = bcd2bin(tm->tm_mday);
191 tm->tm_mon = bcd2bin(tm->tm_mon) - 1;
192 /* epoch == 1900 */
193 tm->tm_year = bcd2bin(tm->tm_year);
194}
195
196/*
197 * spear_rtc_read_time - set the time
198 * @dev: rtc device in use
199 * @tm: holds date and time
200 *
201 * This function read time and date. On success it will return 0
202 * otherwise -ve error is returned.
203 */
204static int spear_rtc_read_time(struct device *dev, struct rtc_time *tm)
205{
206 struct platform_device *pdev = to_platform_device(dev);
207 struct rtc_device *rtc = platform_get_drvdata(pdev);
208 struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
209 unsigned int time, date;
210
211 /* we don't report wday/yday/isdst ... */
212 rtc_wait_not_busy(config);
213
214 time = readl(config->ioaddr + TIME_REG);
215 date = readl(config->ioaddr + DATE_REG);
216 tm->tm_sec = (time >> SECOND_SHIFT) & SECOND_MASK;
217 tm->tm_min = (time >> MINUTE_SHIFT) & MIN_MASK;
218 tm->tm_hour = (time >> HOUR_SHIFT) & HOUR_MASK;
219 tm->tm_mday = (date >> MDAY_SHIFT) & DAY_MASK;
220 tm->tm_mon = (date >> MONTH_SHIFT) & MONTH_MASK;
221 tm->tm_year = (date >> YEAR_SHIFT) & YEAR_MASK;
222
223 bcd2tm(tm);
224 return 0;
225}
226
227/*
228 * spear_rtc_set_time - set the time
229 * @dev: rtc device in use
230 * @tm: holds date and time
231 *
232 * This function set time and date. On success it will return 0
233 * otherwise -ve error is returned.
234 */
235static int spear_rtc_set_time(struct device *dev, struct rtc_time *tm)
236{
237 struct platform_device *pdev = to_platform_device(dev);
238 struct rtc_device *rtc = platform_get_drvdata(pdev);
239 struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
240 unsigned int time, date, err = 0;
241
242 if (tm2bcd(tm) < 0)
243 return -EINVAL;
244
245 rtc_wait_not_busy(config);
246 time = (tm->tm_sec << SECOND_SHIFT) | (tm->tm_min << MINUTE_SHIFT) |
247 (tm->tm_hour << HOUR_SHIFT);
248 date = (tm->tm_mday << MDAY_SHIFT) | (tm->tm_mon << MONTH_SHIFT) |
249 (tm->tm_year << YEAR_SHIFT);
250 writel(time, config->ioaddr + TIME_REG);
251 writel(date, config->ioaddr + DATE_REG);
252 err = is_write_complete(config);
253 if (err < 0)
254 return err;
255
256 return 0;
257}
258
259/*
260 * spear_rtc_read_alarm - read the alarm time
261 * @dev: rtc device in use
262 * @alm: holds alarm date and time
263 *
264 * This function read alarm time and date. On success it will return 0
265 * otherwise -ve error is returned.
266 */
267static int spear_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
268{
269 struct platform_device *pdev = to_platform_device(dev);
270 struct rtc_device *rtc = platform_get_drvdata(pdev);
271 struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
272 unsigned int time, date;
273
274 rtc_wait_not_busy(config);
275
276 time = readl(config->ioaddr + ALARM_TIME_REG);
277 date = readl(config->ioaddr + ALARM_DATE_REG);
278 alm->time.tm_sec = (time >> SECOND_SHIFT) & SECOND_MASK;
279 alm->time.tm_min = (time >> MINUTE_SHIFT) & MIN_MASK;
280 alm->time.tm_hour = (time >> HOUR_SHIFT) & HOUR_MASK;
281 alm->time.tm_mday = (date >> MDAY_SHIFT) & DAY_MASK;
282 alm->time.tm_mon = (date >> MONTH_SHIFT) & MONTH_MASK;
283 alm->time.tm_year = (date >> YEAR_SHIFT) & YEAR_MASK;
284
285 bcd2tm(&alm->time);
286 alm->enabled = readl(config->ioaddr + CTRL_REG) & INT_ENABLE;
287
288 return 0;
289}
290
291/*
292 * spear_rtc_set_alarm - set the alarm time
293 * @dev: rtc device in use
294 * @alm: holds alarm date and time
295 *
296 * This function set alarm time and date. On success it will return 0
297 * otherwise -ve error is returned.
298 */
299static int spear_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
300{
301 struct platform_device *pdev = to_platform_device(dev);
302 struct rtc_device *rtc = platform_get_drvdata(pdev);
303 struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
304 unsigned int time, date, err = 0;
305
306 if (tm2bcd(&alm->time) < 0)
307 return -EINVAL;
308
309 rtc_wait_not_busy(config);
310
311 time = (alm->time.tm_sec << SECOND_SHIFT) | (alm->time.tm_min <<
312 MINUTE_SHIFT) | (alm->time.tm_hour << HOUR_SHIFT);
313 date = (alm->time.tm_mday << MDAY_SHIFT) | (alm->time.tm_mon <<
314 MONTH_SHIFT) | (alm->time.tm_year << YEAR_SHIFT);
315
316 writel(time, config->ioaddr + ALARM_TIME_REG);
317 writel(date, config->ioaddr + ALARM_DATE_REG);
318 err = is_write_complete(config);
319 if (err < 0)
320 return err;
321
322 if (alm->enabled)
323 spear_rtc_enable_interrupt(config);
324 else
325 spear_rtc_disable_interrupt(config);
326
327 return 0;
328}
329static struct rtc_class_ops spear_rtc_ops = {
330 .read_time = spear_rtc_read_time,
331 .set_time = spear_rtc_set_time,
332 .read_alarm = spear_rtc_read_alarm,
333 .set_alarm = spear_rtc_set_alarm,
334};
335
336static int __devinit spear_rtc_probe(struct platform_device *pdev)
337{
338 struct resource *res;
339 struct rtc_device *rtc;
340 struct spear_rtc_config *config;
341 unsigned int status = 0;
342 int irq;
343
344 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
345 if (!res) {
346 dev_err(&pdev->dev, "no resource defined\n");
347 return -EBUSY;
348 }
349 if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
350 dev_err(&pdev->dev, "rtc region already claimed\n");
351 return -EBUSY;
352 }
353
354 config = kzalloc(sizeof(*config), GFP_KERNEL);
355 if (!config) {
356 dev_err(&pdev->dev, "out of memory\n");
357 status = -ENOMEM;
358 goto err_release_region;
359 }
360
361 config->clk = clk_get(&pdev->dev, NULL);
362 if (IS_ERR(config->clk)) {
363 status = PTR_ERR(config->clk);
364 goto err_kfree;
365 }
366
367 status = clk_enable(config->clk);
368 if (status < 0)
369 goto err_clk_put;
370
371 config->ioaddr = ioremap(res->start, resource_size(res));
372 if (!config->ioaddr) {
373 dev_err(&pdev->dev, "ioremap fail\n");
374 status = -ENOMEM;
375 goto err_disable_clock;
376 }
377
378 spin_lock_init(&config->lock);
379
380 rtc = rtc_device_register(pdev->name, &pdev->dev, &spear_rtc_ops,
381 THIS_MODULE);
382 if (IS_ERR(rtc)) {
383 dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
384 PTR_ERR(rtc));
385 status = PTR_ERR(rtc);
386 goto err_iounmap;
387 }
388
389 platform_set_drvdata(pdev, rtc);
390 dev_set_drvdata(&rtc->dev, config);
391
392 /* alarm irqs */
393 irq = platform_get_irq(pdev, 0);
394 if (irq < 0) {
395 dev_err(&pdev->dev, "no update irq?\n");
396 status = irq;
397 goto err_clear_platdata;
398 }
399
400 status = request_irq(irq, spear_rtc_irq, 0, pdev->name, rtc);
401 if (status) {
402 dev_err(&pdev->dev, "Alarm interrupt IRQ%d already \
403 claimed\n", irq);
404 goto err_clear_platdata;
405 }
406
407 if (!device_can_wakeup(&pdev->dev))
408 device_init_wakeup(&pdev->dev, 1);
409
410 return 0;
411
412err_clear_platdata:
413 platform_set_drvdata(pdev, NULL);
414 dev_set_drvdata(&rtc->dev, NULL);
415 rtc_device_unregister(rtc);
416err_iounmap:
417 iounmap(config->ioaddr);
418err_disable_clock:
419 clk_disable(config->clk);
420err_clk_put:
421 clk_put(config->clk);
422err_kfree:
423 kfree(config);
424err_release_region:
425 release_mem_region(res->start, resource_size(res));
426
427 return status;
428}
429
430static int __devexit spear_rtc_remove(struct platform_device *pdev)
431{
432 struct rtc_device *rtc = platform_get_drvdata(pdev);
433 struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
434 int irq;
435 struct resource *res;
436
437 /* leave rtc running, but disable irqs */
438 spear_rtc_disable_interrupt(config);
439 device_init_wakeup(&pdev->dev, 0);
440 irq = platform_get_irq(pdev, 0);
441 if (irq)
442 free_irq(irq, pdev);
443 clk_disable(config->clk);
444 clk_put(config->clk);
445 iounmap(config->ioaddr);
446 kfree(config);
447 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
448 if (res)
449 release_mem_region(res->start, resource_size(res));
450 platform_set_drvdata(pdev, NULL);
451 dev_set_drvdata(&rtc->dev, NULL);
452 rtc_device_unregister(rtc);
453
454 return 0;
455}
456
457#ifdef CONFIG_PM
458
459static int spear_rtc_suspend(struct platform_device *pdev, pm_message_t state)
460{
461 struct rtc_device *rtc = platform_get_drvdata(pdev);
462 struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
463 int irq;
464
465 irq = platform_get_irq(pdev, 0);
466 if (device_may_wakeup(&pdev->dev))
467 enable_irq_wake(irq);
468 else {
469 spear_rtc_disable_interrupt(config);
470 clk_disable(config->clk);
471 }
472
473 return 0;
474}
475
476static int spear_rtc_resume(struct platform_device *pdev)
477{
478 struct rtc_device *rtc = platform_get_drvdata(pdev);
479 struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
480 int irq;
481
482 irq = platform_get_irq(pdev, 0);
483
484 if (device_may_wakeup(&pdev->dev))
485 disable_irq_wake(irq);
486 else {
487 clk_enable(config->clk);
488 spear_rtc_enable_interrupt(config);
489 }
490
491 return 0;
492}
493
494#else
495#define spear_rtc_suspend NULL
496#define spear_rtc_resume NULL
497#endif
498
499static void spear_rtc_shutdown(struct platform_device *pdev)
500{
501 struct rtc_device *rtc = platform_get_drvdata(pdev);
502 struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
503
504 spear_rtc_disable_interrupt(config);
505 clk_disable(config->clk);
506}
507
508static struct platform_driver spear_rtc_driver = {
509 .probe = spear_rtc_probe,
510 .remove = __devexit_p(spear_rtc_remove),
511 .suspend = spear_rtc_suspend,
512 .resume = spear_rtc_resume,
513 .shutdown = spear_rtc_shutdown,
514 .driver = {
515 .name = "rtc-spear",
516 },
517};
518
519static int __init rtc_init(void)
520{
521 return platform_driver_register(&spear_rtc_driver);
522}
523module_init(rtc_init);
524
525static void __exit rtc_exit(void)
526{
527 platform_driver_unregister(&spear_rtc_driver);
528}
529module_exit(rtc_exit);
530
531MODULE_ALIAS("platform:rtc-spear");
532MODULE_AUTHOR("Rajeev Kumar <rajeev-dlh.kumar@st.com>");
533MODULE_DESCRIPTION("ST SPEAr Realtime Clock Driver (RTC)");
534MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
new file mode 100644
index 000000000000..b8bc862903ae
--- /dev/null
+++ b/drivers/rtc/rtc-vt8500.c
@@ -0,0 +1,366 @@
1/*
2 * drivers/rtc/rtc-vt8500.c
3 *
4 * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
5 *
6 * Based on rtc-pxa.c
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/module.h>
19#include <linux/rtc.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/bcd.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26
27/*
28 * Register definitions
29 */
30#define VT8500_RTC_TS 0x00 /* Time set */
31#define VT8500_RTC_DS 0x04 /* Date set */
32#define VT8500_RTC_AS 0x08 /* Alarm set */
33#define VT8500_RTC_CR 0x0c /* Control */
34#define VT8500_RTC_TR 0x10 /* Time read */
35#define VT8500_RTC_DR 0x14 /* Date read */
36#define VT8500_RTC_WS 0x18 /* Write status */
37#define VT8500_RTC_CL 0x20 /* Calibration */
38#define VT8500_RTC_IS 0x24 /* Interrupt status */
39#define VT8500_RTC_ST 0x28 /* Status */
40
41#define INVALID_TIME_BIT (1 << 31)
42
43#define DATE_CENTURY_S 19
44#define DATE_YEAR_S 11
45#define DATE_YEAR_MASK (0xff << DATE_YEAR_S)
46#define DATE_MONTH_S 6
47#define DATE_MONTH_MASK (0x1f << DATE_MONTH_S)
48#define DATE_DAY_MASK 0x3f
49
50#define TIME_DOW_S 20
51#define TIME_DOW_MASK (0x07 << TIME_DOW_S)
52#define TIME_HOUR_S 14
53#define TIME_HOUR_MASK (0x3f << TIME_HOUR_S)
54#define TIME_MIN_S 7
55#define TIME_MIN_MASK (0x7f << TIME_MIN_S)
56#define TIME_SEC_MASK 0x7f
57
58#define ALARM_DAY_S 20
59#define ALARM_DAY_MASK (0x3f << ALARM_DAY_S)
60
61#define ALARM_DAY_BIT (1 << 29)
62#define ALARM_HOUR_BIT (1 << 28)
63#define ALARM_MIN_BIT (1 << 27)
64#define ALARM_SEC_BIT (1 << 26)
65
66#define ALARM_ENABLE_MASK (ALARM_DAY_BIT \
67 | ALARM_HOUR_BIT \
68 | ALARM_MIN_BIT \
69 | ALARM_SEC_BIT)
70
71#define VT8500_RTC_CR_ENABLE (1 << 0) /* Enable RTC */
72#define VT8500_RTC_CR_24H (1 << 1) /* 24h time format */
73#define VT8500_RTC_CR_SM_ENABLE (1 << 2) /* Enable periodic irqs */
74#define VT8500_RTC_CR_SM_SEC (1 << 3) /* 0: 1Hz/60, 1: 1Hz */
75#define VT8500_RTC_CR_CALIB (1 << 4) /* Enable calibration */
76
77struct vt8500_rtc {
78 void __iomem *regbase;
79 struct resource *res;
80 int irq_alarm;
81 int irq_hz;
82 struct rtc_device *rtc;
83 spinlock_t lock; /* Protects this structure */
84};
85
86static irqreturn_t vt8500_rtc_irq(int irq, void *dev_id)
87{
88 struct vt8500_rtc *vt8500_rtc = dev_id;
89 u32 isr;
90 unsigned long events = 0;
91
92 spin_lock(&vt8500_rtc->lock);
93
94 /* clear interrupt sources */
95 isr = readl(vt8500_rtc->regbase + VT8500_RTC_IS);
96 writel(isr, vt8500_rtc->regbase + VT8500_RTC_IS);
97
98 spin_unlock(&vt8500_rtc->lock);
99
100 if (isr & 1)
101 events |= RTC_AF | RTC_IRQF;
102
103 /* Only second/minute interrupts are supported */
104 if (isr & 2)
105 events |= RTC_UF | RTC_IRQF;
106
107 rtc_update_irq(vt8500_rtc->rtc, 1, events);
108
109 return IRQ_HANDLED;
110}
111
112static int vt8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
113{
114 struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
115 u32 date, time;
116
117 date = readl(vt8500_rtc->regbase + VT8500_RTC_DR);
118 time = readl(vt8500_rtc->regbase + VT8500_RTC_TR);
119
120 tm->tm_sec = bcd2bin(time & TIME_SEC_MASK);
121 tm->tm_min = bcd2bin((time & TIME_MIN_MASK) >> TIME_MIN_S);
122 tm->tm_hour = bcd2bin((time & TIME_HOUR_MASK) >> TIME_HOUR_S);
123 tm->tm_mday = bcd2bin(date & DATE_DAY_MASK);
124 tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S);
125 tm->tm_year = bcd2bin((date & DATE_YEAR_MASK) >> DATE_YEAR_S)
126 + ((date >> DATE_CENTURY_S) & 1 ? 200 : 100);
127 tm->tm_wday = (time & TIME_DOW_MASK) >> TIME_DOW_S;
128
129 return 0;
130}
131
132static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
133{
134 struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
135
136 if (tm->tm_year < 100) {
137 dev_warn(dev, "Only years 2000-2199 are supported by the "
138 "hardware!\n");
139 return -EINVAL;
140 }
141
142 writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S)
143 | (bin2bcd(tm->tm_mon) << DATE_MONTH_S)
144 | (bin2bcd(tm->tm_mday)),
145 vt8500_rtc->regbase + VT8500_RTC_DS);
146 writel((bin2bcd(tm->tm_wday) << TIME_DOW_S)
147 | (bin2bcd(tm->tm_hour) << TIME_HOUR_S)
148 | (bin2bcd(tm->tm_min) << TIME_MIN_S)
149 | (bin2bcd(tm->tm_sec)),
150 vt8500_rtc->regbase + VT8500_RTC_TS);
151
152 return 0;
153}
154
155static int vt8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
156{
157 struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
158 u32 isr, alarm;
159
160 alarm = readl(vt8500_rtc->regbase + VT8500_RTC_AS);
161 isr = readl(vt8500_rtc->regbase + VT8500_RTC_IS);
162
163 alrm->time.tm_mday = bcd2bin((alarm & ALARM_DAY_MASK) >> ALARM_DAY_S);
164 alrm->time.tm_hour = bcd2bin((alarm & TIME_HOUR_MASK) >> TIME_HOUR_S);
165 alrm->time.tm_min = bcd2bin((alarm & TIME_MIN_MASK) >> TIME_MIN_S);
166 alrm->time.tm_sec = bcd2bin((alarm & TIME_SEC_MASK));
167
168 alrm->enabled = (alarm & ALARM_ENABLE_MASK) ? 1 : 0;
169
170 alrm->pending = (isr & 1) ? 1 : 0;
171 return rtc_valid_tm(&alrm->time);
172}
173
174static int vt8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
175{
176 struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
177
178 writel((alrm->enabled ? ALARM_ENABLE_MASK : 0)
179 | (bin2bcd(alrm->time.tm_mday) << ALARM_DAY_S)
180 | (bin2bcd(alrm->time.tm_hour) << TIME_HOUR_S)
181 | (bin2bcd(alrm->time.tm_min) << TIME_MIN_S)
182 | (bin2bcd(alrm->time.tm_sec)),
183 vt8500_rtc->regbase + VT8500_RTC_AS);
184
185 return 0;
186}
187
188static int vt8500_alarm_irq_enable(struct device *dev, unsigned int enabled)
189{
190 struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
191 unsigned long tmp = readl(vt8500_rtc->regbase + VT8500_RTC_AS);
192
193 if (enabled)
194 tmp |= ALARM_ENABLE_MASK;
195 else
196 tmp &= ~ALARM_ENABLE_MASK;
197
198 writel(tmp, vt8500_rtc->regbase + VT8500_RTC_AS);
199 return 0;
200}
201
202static int vt8500_update_irq_enable(struct device *dev, unsigned int enabled)
203{
204 struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
205 unsigned long tmp = readl(vt8500_rtc->regbase + VT8500_RTC_CR);
206
207 if (enabled)
208 tmp |= VT8500_RTC_CR_SM_SEC | VT8500_RTC_CR_SM_ENABLE;
209 else
210 tmp &= ~VT8500_RTC_CR_SM_ENABLE;
211
212 writel(tmp, vt8500_rtc->regbase + VT8500_RTC_CR);
213 return 0;
214}
215
216static const struct rtc_class_ops vt8500_rtc_ops = {
217 .read_time = vt8500_rtc_read_time,
218 .set_time = vt8500_rtc_set_time,
219 .read_alarm = vt8500_rtc_read_alarm,
220 .set_alarm = vt8500_rtc_set_alarm,
221 .alarm_irq_enable = vt8500_alarm_irq_enable,
222 .update_irq_enable = vt8500_update_irq_enable,
223};
224
225static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
226{
227 struct vt8500_rtc *vt8500_rtc;
228 int ret;
229
230 vt8500_rtc = kzalloc(sizeof(struct vt8500_rtc), GFP_KERNEL);
231 if (!vt8500_rtc)
232 return -ENOMEM;
233
234 spin_lock_init(&vt8500_rtc->lock);
235 platform_set_drvdata(pdev, vt8500_rtc);
236
237 vt8500_rtc->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
238 if (!vt8500_rtc->res) {
239 dev_err(&pdev->dev, "No I/O memory resource defined\n");
240 ret = -ENXIO;
241 goto err_free;
242 }
243
244 vt8500_rtc->irq_alarm = platform_get_irq(pdev, 0);
245 if (vt8500_rtc->irq_alarm < 0) {
246 dev_err(&pdev->dev, "No alarm IRQ resource defined\n");
247 ret = -ENXIO;
248 goto err_free;
249 }
250
251 vt8500_rtc->irq_hz = platform_get_irq(pdev, 1);
252 if (vt8500_rtc->irq_hz < 0) {
253 dev_err(&pdev->dev, "No 1Hz IRQ resource defined\n");
254 ret = -ENXIO;
255 goto err_free;
256 }
257
258 vt8500_rtc->res = request_mem_region(vt8500_rtc->res->start,
259 resource_size(vt8500_rtc->res),
260 "vt8500-rtc");
261 if (vt8500_rtc->res == NULL) {
262 dev_err(&pdev->dev, "failed to request I/O memory\n");
263 ret = -EBUSY;
264 goto err_free;
265 }
266
267 vt8500_rtc->regbase = ioremap(vt8500_rtc->res->start,
268 resource_size(vt8500_rtc->res));
269 if (!vt8500_rtc->regbase) {
270 dev_err(&pdev->dev, "Unable to map RTC I/O memory\n");
271 ret = -EBUSY;
272 goto err_release;
273 }
274
275 /* Enable the second/minute interrupt generation and enable RTC */
276 writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H
277 | VT8500_RTC_CR_SM_ENABLE | VT8500_RTC_CR_SM_SEC,
278 vt8500_rtc->regbase + VT8500_RTC_CR);
279
280 vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev,
281 &vt8500_rtc_ops, THIS_MODULE);
282 if (IS_ERR(vt8500_rtc->rtc)) {
283 ret = PTR_ERR(vt8500_rtc->rtc);
284 dev_err(&pdev->dev,
285 "Failed to register RTC device -> %d\n", ret);
286 goto err_unmap;
287 }
288
289 ret = request_irq(vt8500_rtc->irq_hz, vt8500_rtc_irq, 0,
290 "rtc 1Hz", vt8500_rtc);
291 if (ret < 0) {
292 dev_err(&pdev->dev, "can't get irq %i, err %d\n",
293 vt8500_rtc->irq_hz, ret);
294 goto err_unreg;
295 }
296
297 ret = request_irq(vt8500_rtc->irq_alarm, vt8500_rtc_irq, 0,
298 "rtc alarm", vt8500_rtc);
299 if (ret < 0) {
300 dev_err(&pdev->dev, "can't get irq %i, err %d\n",
301 vt8500_rtc->irq_alarm, ret);
302 goto err_free_hz;
303 }
304
305 return 0;
306
307err_free_hz:
308 free_irq(vt8500_rtc->irq_hz, vt8500_rtc);
309err_unreg:
310 rtc_device_unregister(vt8500_rtc->rtc);
311err_unmap:
312 iounmap(vt8500_rtc->regbase);
313err_release:
314 release_mem_region(vt8500_rtc->res->start,
315 resource_size(vt8500_rtc->res));
316err_free:
317 kfree(vt8500_rtc);
318 return ret;
319}
320
321static int __devexit vt8500_rtc_remove(struct platform_device *pdev)
322{
323 struct vt8500_rtc *vt8500_rtc = platform_get_drvdata(pdev);
324
325 free_irq(vt8500_rtc->irq_alarm, vt8500_rtc);
326 free_irq(vt8500_rtc->irq_hz, vt8500_rtc);
327
328 rtc_device_unregister(vt8500_rtc->rtc);
329
330 /* Disable alarm matching */
331 writel(0, vt8500_rtc->regbase + VT8500_RTC_IS);
332 iounmap(vt8500_rtc->regbase);
333 release_mem_region(vt8500_rtc->res->start,
334 resource_size(vt8500_rtc->res));
335
336 kfree(vt8500_rtc);
337 platform_set_drvdata(pdev, NULL);
338
339 return 0;
340}
341
342static struct platform_driver vt8500_rtc_driver = {
343 .probe = vt8500_rtc_probe,
344 .remove = __devexit_p(vt8500_rtc_remove),
345 .driver = {
346 .name = "vt8500-rtc",
347 .owner = THIS_MODULE,
348 },
349};
350
351static int __init vt8500_rtc_init(void)
352{
353 return platform_driver_register(&vt8500_rtc_driver);
354}
355module_init(vt8500_rtc_init);
356
357static void __exit vt8500_rtc_exit(void)
358{
359 platform_driver_unregister(&vt8500_rtc_driver);
360}
361module_exit(vt8500_rtc_exit);
362
363MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
364MODULE_DESCRIPTION("VIA VT8500 SoC Realtime Clock Driver (RTC)");
365MODULE_LICENSE("GPL");
366MODULE_ALIAS("platform:vt8500-rtc");
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 85dddb1e4126..46784b83c5c4 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -24,7 +24,7 @@
24#include <asm/debug.h> 24#include <asm/debug.h>
25#include <asm/ebcdic.h> 25#include <asm/ebcdic.h>
26#include <asm/io.h> 26#include <asm/io.h>
27#include <asm/s390_ext.h> 27#include <asm/irq.h>
28#include <asm/vtoc.h> 28#include <asm/vtoc.h>
29#include <asm/diag.h> 29#include <asm/diag.h>
30 30
@@ -642,7 +642,7 @@ dasd_diag_init(void)
642 } 642 }
643 ASCEBC(dasd_diag_discipline.ebcname, 4); 643 ASCEBC(dasd_diag_discipline.ebcname, 4);
644 644
645 ctl_set_bit(0, 9); 645 service_subclass_irq_register();
646 register_external_interrupt(0x2603, dasd_ext_handler); 646 register_external_interrupt(0x2603, dasd_ext_handler);
647 dasd_diag_discipline_pointer = &dasd_diag_discipline; 647 dasd_diag_discipline_pointer = &dasd_diag_discipline;
648 return 0; 648 return 0;
@@ -652,7 +652,7 @@ static void __exit
652dasd_diag_cleanup(void) 652dasd_diag_cleanup(void)
653{ 653{
654 unregister_external_interrupt(0x2603, dasd_ext_handler); 654 unregister_external_interrupt(0x2603, dasd_ext_handler);
655 ctl_clear_bit(0, 9); 655 service_subclass_irq_unregister();
656 dasd_diag_discipline_pointer = NULL; 656 dasd_diag_discipline_pointer = NULL;
657} 657}
658 658
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index b76c61f82485..eaa7e78186f9 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -19,7 +19,6 @@
19#include <linux/suspend.h> 19#include <linux/suspend.h>
20#include <linux/completion.h> 20#include <linux/completion.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <asm/s390_ext.h>
23#include <asm/types.h> 22#include <asm/types.h>
24#include <asm/irq.h> 23#include <asm/irq.h>
25 24
@@ -885,12 +884,12 @@ sclp_check_interface(void)
885 spin_unlock_irqrestore(&sclp_lock, flags); 884 spin_unlock_irqrestore(&sclp_lock, flags);
886 /* Enable service-signal interruption - needs to happen 885 /* Enable service-signal interruption - needs to happen
887 * with IRQs enabled. */ 886 * with IRQs enabled. */
888 ctl_set_bit(0, 9); 887 service_subclass_irq_register();
889 /* Wait for signal from interrupt or timeout */ 888 /* Wait for signal from interrupt or timeout */
890 sclp_sync_wait(); 889 sclp_sync_wait();
891 /* Disable service-signal interruption - needs to happen 890 /* Disable service-signal interruption - needs to happen
892 * with IRQs enabled. */ 891 * with IRQs enabled. */
893 ctl_clear_bit(0,9); 892 service_subclass_irq_unregister();
894 spin_lock_irqsave(&sclp_lock, flags); 893 spin_lock_irqsave(&sclp_lock, flags);
895 del_timer(&sclp_request_timer); 894 del_timer(&sclp_request_timer);
896 if (sclp_init_req.status == SCLP_REQ_DONE && 895 if (sclp_init_req.status == SCLP_REQ_DONE &&
@@ -1070,7 +1069,7 @@ sclp_init(void)
1070 spin_unlock_irqrestore(&sclp_lock, flags); 1069 spin_unlock_irqrestore(&sclp_lock, flags);
1071 /* Enable service-signal external interruption - needs to happen with 1070 /* Enable service-signal external interruption - needs to happen with
1072 * IRQs enabled. */ 1071 * IRQs enabled. */
1073 ctl_set_bit(0, 9); 1072 service_subclass_irq_register();
1074 sclp_init_mask(1); 1073 sclp_init_mask(1);
1075 return 0; 1074 return 0;
1076 1075
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 607998f0b7d8..aec60d55b10d 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -25,7 +25,6 @@
25#include <asm/kvm_para.h> 25#include <asm/kvm_para.h>
26#include <asm/kvm_virtio.h> 26#include <asm/kvm_virtio.h>
27#include <asm/setup.h> 27#include <asm/setup.h>
28#include <asm/s390_ext.h>
29#include <asm/irq.h> 28#include <asm/irq.h>
30 29
31#define VIRTIO_SUBCODE_64 0x0D00 30#define VIRTIO_SUBCODE_64 0x0D00
@@ -441,7 +440,7 @@ static int __init kvm_devices_init(void)
441 440
442 INIT_WORK(&hotplug_work, hotplug_devices); 441 INIT_WORK(&hotplug_work, hotplug_devices);
443 442
444 ctl_set_bit(0, 9); 443 service_subclass_irq_register();
445 register_external_interrupt(0x2603, kvm_extint_handler); 444 register_external_interrupt(0x2603, kvm_extint_handler);
446 445
447 scan_devices(); 446 scan_devices();
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 4ff26521d75f..3382475dc22d 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -59,7 +59,6 @@
59#ifndef AAC_DRIVER_BRANCH 59#ifndef AAC_DRIVER_BRANCH
60#define AAC_DRIVER_BRANCH "" 60#define AAC_DRIVER_BRANCH ""
61#endif 61#endif
62#define AAC_DRIVER_BUILD_DATE __DATE__ " " __TIME__
63#define AAC_DRIVERNAME "aacraid" 62#define AAC_DRIVERNAME "aacraid"
64 63
65#ifdef AAC_DRIVER_BUILD 64#ifdef AAC_DRIVER_BUILD
@@ -67,7 +66,7 @@
67#define str(x) _str(x) 66#define str(x) _str(x)
68#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH 67#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH
69#else 68#else
70#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION AAC_DRIVER_BRANCH " " AAC_DRIVER_BUILD_DATE 69#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION AAC_DRIVER_BRANCH
71#endif 70#endif
72 71
73MODULE_AUTHOR("Red Hat Inc and Adaptec"); 72MODULE_AUTHOR("Red Hat Inc and Adaptec");
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 3b7e83d2dab4..d5ff142c93a2 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -486,7 +486,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
486 flash_error_table[i].reason); 486 flash_error_table[i].reason);
487} 487}
488 488
489static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO, 489static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
490 asd_show_update_bios, asd_store_update_bios); 490 asd_show_update_bios, asd_store_update_bios);
491 491
492static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha) 492static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index c1f72c49196f..6c7e0339dda4 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -56,6 +56,8 @@ BFA_TRC_FILE(CNA, IOC);
56#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 56#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
57#define bfa_ioc_notify_fail(__ioc) \ 57#define bfa_ioc_notify_fail(__ioc) \
58 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) 58 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
59#define bfa_ioc_sync_start(__ioc) \
60 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
59#define bfa_ioc_sync_join(__ioc) \ 61#define bfa_ioc_sync_join(__ioc) \
60 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) 62 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
61#define bfa_ioc_sync_leave(__ioc) \ 63#define bfa_ioc_sync_leave(__ioc) \
@@ -647,7 +649,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
647 switch (event) { 649 switch (event) {
648 case IOCPF_E_SEMLOCKED: 650 case IOCPF_E_SEMLOCKED:
649 if (bfa_ioc_firmware_lock(ioc)) { 651 if (bfa_ioc_firmware_lock(ioc)) {
650 if (bfa_ioc_sync_complete(ioc)) { 652 if (bfa_ioc_sync_start(ioc)) {
651 iocpf->retry_count = 0; 653 iocpf->retry_count = 0;
652 bfa_ioc_sync_join(ioc); 654 bfa_ioc_sync_join(ioc);
653 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 655 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index ec9cf08b0e7f..c85182a704fb 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -263,6 +263,7 @@ struct bfa_ioc_hwif_s {
263 bfa_boolean_t msix); 263 bfa_boolean_t msix);
264 void (*ioc_notify_fail) (struct bfa_ioc_s *ioc); 264 void (*ioc_notify_fail) (struct bfa_ioc_s *ioc);
265 void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc); 265 void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc);
266 bfa_boolean_t (*ioc_sync_start) (struct bfa_ioc_s *ioc);
266 void (*ioc_sync_join) (struct bfa_ioc_s *ioc); 267 void (*ioc_sync_join) (struct bfa_ioc_s *ioc);
267 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc); 268 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
268 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc); 269 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index e4a0713185b6..89ae4c8f95a2 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -32,6 +32,7 @@ static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
32static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); 32static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
33static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc); 33static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc);
34static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc); 34static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
35static bfa_boolean_t bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc);
35static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc); 36static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc);
36static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc); 37static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc);
37static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc); 38static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc);
@@ -53,6 +54,7 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
53 hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set; 54 hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set;
54 hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail; 55 hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail;
55 hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset; 56 hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset;
57 hwif_cb.ioc_sync_start = bfa_ioc_cb_sync_start;
56 hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join; 58 hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join;
57 hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave; 59 hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave;
58 hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack; 60 hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack;
@@ -195,6 +197,15 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
195} 197}
196 198
197/* 199/*
200 * Synchronized IOC failure processing routines
201 */
202static bfa_boolean_t
203bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc)
204{
205 return bfa_ioc_cb_sync_complete(ioc);
206}
207
208/*
198 * Cleanup hw semaphore and usecnt registers 209 * Cleanup hw semaphore and usecnt registers
199 */ 210 */
200static void 211static void
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 008d129ddfcd..93612520f0d2 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -41,6 +41,7 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
41static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); 41static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
42static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc); 42static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
43static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc); 43static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
44static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
44static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc); 45static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
45static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc); 46static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
46static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc); 47static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
@@ -62,6 +63,7 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
62 hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; 63 hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
63 hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail; 64 hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
64 hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; 65 hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
66 hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
65 hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join; 67 hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
66 hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave; 68 hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
67 hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack; 69 hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
@@ -351,6 +353,30 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
351 writel(1, ioc->ioc_regs.ioc_sem_reg); 353 writel(1, ioc->ioc_regs.ioc_sem_reg);
352} 354}
353 355
356static bfa_boolean_t
357bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
358{
359 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
360 uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
361
362 /*
363 * Driver load time. If the sync required bit for this PCI fn
364 * is set, it is due to an unclean exit by the driver for this
365 * PCI fn in the previous incarnation. Whoever comes here first
366 * should clean it up, no matter which PCI fn.
367 */
368
369 if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
370 writel(0, ioc->ioc_regs.ioc_fail_sync);
371 writel(1, ioc->ioc_regs.ioc_usage_reg);
372 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
373 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
374 return BFA_TRUE;
375 }
376
377 return bfa_ioc_ct_sync_complete(ioc);
378}
379
354/* 380/*
355 * Synchronized IOC failure processing routines 381 * Synchronized IOC failure processing routines
356 */ 382 */
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index cfd59023227b..6bdd25a93db9 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -66,11 +66,11 @@
66#define BD_SPLIT_SIZE 32768 66#define BD_SPLIT_SIZE 32768
67 67
68/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */ 68/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
69#define BNX2I_SQ_WQES_MIN 16 69#define BNX2I_SQ_WQES_MIN 16
70#define BNX2I_570X_SQ_WQES_MAX 128 70#define BNX2I_570X_SQ_WQES_MAX 128
71#define BNX2I_5770X_SQ_WQES_MAX 512 71#define BNX2I_5770X_SQ_WQES_MAX 512
72#define BNX2I_570X_SQ_WQES_DEFAULT 128 72#define BNX2I_570X_SQ_WQES_DEFAULT 128
73#define BNX2I_5770X_SQ_WQES_DEFAULT 256 73#define BNX2I_5770X_SQ_WQES_DEFAULT 128
74 74
75#define BNX2I_570X_CQ_WQES_MAX 128 75#define BNX2I_570X_CQ_WQES_MAX 128
76#define BNX2I_5770X_CQ_WQES_MAX 512 76#define BNX2I_5770X_CQ_WQES_MAX 512
@@ -115,6 +115,7 @@
115#define BNX2X_MAX_CQS 8 115#define BNX2X_MAX_CQS 8
116 116
117#define CNIC_ARM_CQE 1 117#define CNIC_ARM_CQE 1
118#define CNIC_ARM_CQE_FP 2
118#define CNIC_DISARM_CQE 0 119#define CNIC_DISARM_CQE 0
119 120
120#define REG_RD(__hba, offset) \ 121#define REG_RD(__hba, offset) \
@@ -666,7 +667,9 @@ enum {
666 * after HBA reset is completed by bnx2i/cnic/bnx2 667 * after HBA reset is completed by bnx2i/cnic/bnx2
667 * modules 668 * modules
668 * @state: tracks offload connection state machine 669 * @state: tracks offload connection state machine
669 * @teardown_mode: indicates if conn teardown is abortive or orderly 670 * @timestamp: tracks the start time when the ep begins to connect
671 * @num_active_cmds: tracks the number of outstanding commands for this ep
672 * @ec_shift: the amount of shift as part of the event coal calc
670 * @qp: QP information 673 * @qp: QP information
671 * @ids: contains chip allocated *context id* & driver assigned 674 * @ids: contains chip allocated *context id* & driver assigned
672 * *iscsi cid* 675 * *iscsi cid*
@@ -685,6 +688,7 @@ struct bnx2i_endpoint {
685 u32 state; 688 u32 state;
686 unsigned long timestamp; 689 unsigned long timestamp;
687 int num_active_cmds; 690 int num_active_cmds;
691 u32 ec_shift;
688 692
689 struct qp_info qp; 693 struct qp_info qp;
690 struct ep_handles ids; 694 struct ep_handles ids;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index f0b89513faed..5c54a2d9b834 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -138,7 +138,6 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
138 u16 next_index; 138 u16 next_index;
139 u32 num_active_cmds; 139 u32 num_active_cmds;
140 140
141
142 /* Coalesce CQ entries only on 10G devices */ 141 /* Coalesce CQ entries only on 10G devices */
143 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) 142 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
144 return; 143 return;
@@ -148,16 +147,19 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
148 * interrupts and other unwanted results 147 * interrupts and other unwanted results
149 */ 148 */
150 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt; 149 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
151 if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
152 return;
153 150
154 if (action == CNIC_ARM_CQE) { 151 if (action != CNIC_ARM_CQE_FP)
152 if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
153 return;
154
155 if (action == CNIC_ARM_CQE || action == CNIC_ARM_CQE_FP) {
155 num_active_cmds = ep->num_active_cmds; 156 num_active_cmds = ep->num_active_cmds;
156 if (num_active_cmds <= event_coal_min) 157 if (num_active_cmds <= event_coal_min)
157 next_index = 1; 158 next_index = 1;
158 else 159 else
159 next_index = event_coal_min + 160 next_index = event_coal_min +
160 (num_active_cmds - event_coal_min) / event_coal_div; 161 ((num_active_cmds - event_coal_min) >>
162 ep->ec_shift);
161 if (!next_index) 163 if (!next_index)
162 next_index = 1; 164 next_index = 1;
163 cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1; 165 cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
@@ -1274,6 +1276,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
1274 iscsi_init.dummy_buffer_addr_hi = 1276 iscsi_init.dummy_buffer_addr_hi =
1275 (u32) ((u64) hba->dummy_buf_dma >> 32); 1277 (u32) ((u64) hba->dummy_buf_dma >> 32);
1276 1278
1279 hba->num_ccell = hba->max_sqes >> 1;
1277 hba->ctx_ccell_tasks = 1280 hba->ctx_ccell_tasks =
1278 ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); 1281 ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
1279 iscsi_init.num_ccells_per_conn = hba->num_ccell; 1282 iscsi_init.num_ccells_per_conn = hba->num_ccell;
@@ -1934,7 +1937,6 @@ cqe_out:
1934 qp->cq_cons_idx++; 1937 qp->cq_cons_idx++;
1935 } 1938 }
1936 } 1939 }
1937 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
1938} 1940}
1939 1941
1940/** 1942/**
@@ -1948,22 +1950,23 @@ cqe_out:
1948static void bnx2i_fastpath_notification(struct bnx2i_hba *hba, 1950static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
1949 struct iscsi_kcqe *new_cqe_kcqe) 1951 struct iscsi_kcqe *new_cqe_kcqe)
1950{ 1952{
1951 struct bnx2i_conn *conn; 1953 struct bnx2i_conn *bnx2i_conn;
1952 u32 iscsi_cid; 1954 u32 iscsi_cid;
1953 1955
1954 iscsi_cid = new_cqe_kcqe->iscsi_conn_id; 1956 iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
1955 conn = bnx2i_get_conn_from_id(hba, iscsi_cid); 1957 bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1956 1958
1957 if (!conn) { 1959 if (!bnx2i_conn) {
1958 printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid); 1960 printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
1959 return; 1961 return;
1960 } 1962 }
1961 if (!conn->ep) { 1963 if (!bnx2i_conn->ep) {
1962 printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid); 1964 printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
1963 return; 1965 return;
1964 } 1966 }
1965 1967 bnx2i_process_new_cqes(bnx2i_conn);
1966 bnx2i_process_new_cqes(conn); 1968 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP);
1969 bnx2i_process_new_cqes(bnx2i_conn);
1967} 1970}
1968 1971
1969 1972
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 1d24a2819736..6adbdc34a9a5 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -244,7 +244,7 @@ void bnx2i_stop(void *handle)
244 wait_event_interruptible_timeout(hba->eh_wait, 244 wait_event_interruptible_timeout(hba->eh_wait,
245 (list_empty(&hba->ep_ofld_list) && 245 (list_empty(&hba->ep_ofld_list) &&
246 list_empty(&hba->ep_destroy_list)), 246 list_empty(&hba->ep_destroy_list)),
247 10 * HZ); 247 2 * HZ);
248 /* Wait for all endpoints to be torn down, Chip will be reset once 248 /* Wait for all endpoints to be torn down, Chip will be reset once
249 * control returns to network driver. So it is required to cleanup and 249 * control returns to network driver. So it is required to cleanup and
250 * release all connection resources before returning from this routine. 250 * release all connection resources before returning from this routine.
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 1809f9ccc4ce..041928b23cb0 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -379,6 +379,7 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
379{ 379{
380 struct iscsi_endpoint *ep; 380 struct iscsi_endpoint *ep;
381 struct bnx2i_endpoint *bnx2i_ep; 381 struct bnx2i_endpoint *bnx2i_ep;
382 u32 ec_div;
382 383
383 ep = iscsi_create_endpoint(sizeof(*bnx2i_ep)); 384 ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
384 if (!ep) { 385 if (!ep) {
@@ -393,6 +394,11 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
393 bnx2i_ep->ep_iscsi_cid = (u16) -1; 394 bnx2i_ep->ep_iscsi_cid = (u16) -1;
394 bnx2i_ep->hba = hba; 395 bnx2i_ep->hba = hba;
395 bnx2i_ep->hba_age = hba->age; 396 bnx2i_ep->hba_age = hba->age;
397
398 ec_div = event_coal_div;
399 while (ec_div >>= 1)
400 bnx2i_ep->ec_shift += 1;
401
396 hba->ofld_conns_active++; 402 hba->ofld_conns_active++;
397 init_waitqueue_head(&bnx2i_ep->ofld_wait); 403 init_waitqueue_head(&bnx2i_ep->ofld_wait);
398 return ep; 404 return ep;
@@ -858,7 +864,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
858 mutex_init(&hba->net_dev_lock); 864 mutex_init(&hba->net_dev_lock);
859 init_waitqueue_head(&hba->eh_wait); 865 init_waitqueue_head(&hba->eh_wait);
860 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { 866 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
861 hba->hba_shutdown_tmo = 20 * HZ; 867 hba->hba_shutdown_tmo = 30 * HZ;
862 hba->conn_teardown_tmo = 20 * HZ; 868 hba->conn_teardown_tmo = 20 * HZ;
863 hba->conn_ctx_destroy_tmo = 6 * HZ; 869 hba->conn_ctx_destroy_tmo = 6 * HZ;
864 } else { /* 5706/5708/5709 */ 870 } else { /* 5706/5708/5709 */
@@ -1208,6 +1214,9 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
1208 struct bnx2i_cmd *cmd = task->dd_data; 1214 struct bnx2i_cmd *cmd = task->dd_data;
1209 struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr; 1215 struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
1210 1216
1217 if (bnx2i_conn->ep->num_active_cmds + 1 > hba->max_sqes)
1218 return -ENOMEM;
1219
1211 /* 1220 /*
1212 * If there is no scsi_cmnd this must be a mgmt task 1221 * If there is no scsi_cmnd this must be a mgmt task
1213 */ 1222 */
@@ -2156,7 +2165,7 @@ static struct scsi_host_template bnx2i_host_template = {
2156 .change_queue_depth = iscsi_change_queue_depth, 2165 .change_queue_depth = iscsi_change_queue_depth,
2157 .can_queue = 1024, 2166 .can_queue = 1024,
2158 .max_sectors = 127, 2167 .max_sectors = 127,
2159 .cmd_per_lun = 32, 2168 .cmd_per_lun = 24,
2160 .this_id = -1, 2169 .this_id = -1,
2161 .use_clustering = ENABLE_CLUSTERING, 2170 .use_clustering = ENABLE_CLUSTERING,
2162 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD, 2171 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index cc23bd9480b2..155d7b9bdeae 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -137,6 +137,7 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
137static int fcoe_vport_disable(struct fc_vport *, bool disable); 137static int fcoe_vport_disable(struct fc_vport *, bool disable);
138static void fcoe_set_vport_symbolic_name(struct fc_vport *); 138static void fcoe_set_vport_symbolic_name(struct fc_vport *);
139static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *); 139static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
140static int fcoe_validate_vport_create(struct fc_vport *);
140 141
141static struct libfc_function_template fcoe_libfc_fcn_templ = { 142static struct libfc_function_template fcoe_libfc_fcn_templ = {
142 .frame_send = fcoe_xmit, 143 .frame_send = fcoe_xmit,
@@ -2351,6 +2352,17 @@ static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
2351 struct fcoe_interface *fcoe = port->priv; 2352 struct fcoe_interface *fcoe = port->priv;
2352 struct net_device *netdev = fcoe->netdev; 2353 struct net_device *netdev = fcoe->netdev;
2353 struct fc_lport *vn_port; 2354 struct fc_lport *vn_port;
2355 int rc;
2356 char buf[32];
2357
2358 rc = fcoe_validate_vport_create(vport);
2359 if (rc) {
2360 wwn_to_str(vport->port_name, buf, sizeof(buf));
2361 printk(KERN_ERR "fcoe: Failed to create vport, "
2362 "WWPN (0x%s) already exists\n",
2363 buf);
2364 return rc;
2365 }
2354 2366
2355 mutex_lock(&fcoe_config_mutex); 2367 mutex_lock(&fcoe_config_mutex);
2356 vn_port = fcoe_if_create(fcoe, &vport->dev, 1); 2368 vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
@@ -2497,3 +2509,49 @@ static void fcoe_set_port_id(struct fc_lport *lport,
2497 if (fp && fc_frame_payload_op(fp) == ELS_FLOGI) 2509 if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
2498 fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp); 2510 fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
2499} 2511}
2512
2513/**
2514 * fcoe_validate_vport_create() - Validate a vport before creating it
2515 * @vport: NPIV port to be created
2516 *
2517 * This routine is meant to add validation for a vport before creating it
2518 * via fcoe_vport_create().
2519 * Current validations are:
2520 * - WWPN supplied is unique for given lport
2521 *
2522 *
2523*/
2524static int fcoe_validate_vport_create(struct fc_vport *vport)
2525{
2526 struct Scsi_Host *shost = vport_to_shost(vport);
2527 struct fc_lport *n_port = shost_priv(shost);
2528 struct fc_lport *vn_port;
2529 int rc = 0;
2530 char buf[32];
2531
2532 mutex_lock(&n_port->lp_mutex);
2533
2534 wwn_to_str(vport->port_name, buf, sizeof(buf));
2535 /* Check if the wwpn is not same as that of the lport */
2536 if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) {
2537 FCOE_DBG("vport WWPN 0x%s is same as that of the "
2538 "base port WWPN\n", buf);
2539 rc = -EINVAL;
2540 goto out;
2541 }
2542
2543 /* Check if there is any existing vport with same wwpn */
2544 list_for_each_entry(vn_port, &n_port->vports, list) {
2545 if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) {
2546 FCOE_DBG("vport with given WWPN 0x%s already "
2547 "exists\n", buf);
2548 rc = -EINVAL;
2549 break;
2550 }
2551 }
2552
2553out:
2554 mutex_unlock(&n_port->lp_mutex);
2555
2556 return rc;
2557}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 408a6fd78fb4..c4a93993c0cf 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -99,4 +99,14 @@ static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
99 ((struct fcoe_port *)lport_priv(lport))->priv)->netdev; 99 ((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
100} 100}
101 101
102static inline void wwn_to_str(u64 wwn, char *buf, int len)
103{
104 u8 wwpn[8];
105
106 u64_to_wwn(wwn, wwpn);
107 snprintf(buf, len, "%02x%02x%02x%02x%02x%02x%02x%02x",
108 wwpn[0], wwpn[1], wwpn[2], wwpn[3],
109 wwpn[4], wwpn[5], wwpn[6], wwpn[7]);
110}
111
102#endif /* _FCOE_H_ */ 112#endif /* _FCOE_H_ */
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 229e4af5508a..c74c4b8e71ef 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1173,7 +1173,9 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
1173 struct fc_lport *lport = fip->lp; 1173 struct fc_lport *lport = fip->lp;
1174 struct fc_lport *vn_port = NULL; 1174 struct fc_lport *vn_port = NULL;
1175 u32 desc_mask; 1175 u32 desc_mask;
1176 int is_vn_port = 0; 1176 int num_vlink_desc;
1177 int reset_phys_port = 0;
1178 struct fip_vn_desc **vlink_desc_arr = NULL;
1177 1179
1178 LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n"); 1180 LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
1179 1181
@@ -1183,70 +1185,73 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
1183 /* 1185 /*
1184 * mask of required descriptors. Validating each one clears its bit. 1186 * mask of required descriptors. Validating each one clears its bit.
1185 */ 1187 */
1186 desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | BIT(FIP_DT_VN_ID); 1188 desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME);
1187 1189
1188 rlen = ntohs(fh->fip_dl_len) * FIP_BPW; 1190 rlen = ntohs(fh->fip_dl_len) * FIP_BPW;
1189 desc = (struct fip_desc *)(fh + 1); 1191 desc = (struct fip_desc *)(fh + 1);
1192
1193 /*
1194 * Actually need to subtract 'sizeof(*mp) - sizeof(*wp)' from 'rlen'
1195 * before determining max Vx_Port descriptor but a buggy FCF could have
1196 * omited either or both MAC Address and Name Identifier descriptors
1197 */
1198 num_vlink_desc = rlen / sizeof(*vp);
1199 if (num_vlink_desc)
1200 vlink_desc_arr = kmalloc(sizeof(vp) * num_vlink_desc,
1201 GFP_ATOMIC);
1202 if (!vlink_desc_arr)
1203 return;
1204 num_vlink_desc = 0;
1205
1190 while (rlen >= sizeof(*desc)) { 1206 while (rlen >= sizeof(*desc)) {
1191 dlen = desc->fip_dlen * FIP_BPW; 1207 dlen = desc->fip_dlen * FIP_BPW;
1192 if (dlen > rlen) 1208 if (dlen > rlen)
1193 return; 1209 goto err;
1194 /* Drop CVL if there are duplicate critical descriptors */ 1210 /* Drop CVL if there are duplicate critical descriptors */
1195 if ((desc->fip_dtype < 32) && 1211 if ((desc->fip_dtype < 32) &&
1212 (desc->fip_dtype != FIP_DT_VN_ID) &&
1196 !(desc_mask & 1U << desc->fip_dtype)) { 1213 !(desc_mask & 1U << desc->fip_dtype)) {
1197 LIBFCOE_FIP_DBG(fip, "Duplicate Critical " 1214 LIBFCOE_FIP_DBG(fip, "Duplicate Critical "
1198 "Descriptors in FIP CVL\n"); 1215 "Descriptors in FIP CVL\n");
1199 return; 1216 goto err;
1200 } 1217 }
1201 switch (desc->fip_dtype) { 1218 switch (desc->fip_dtype) {
1202 case FIP_DT_MAC: 1219 case FIP_DT_MAC:
1203 mp = (struct fip_mac_desc *)desc; 1220 mp = (struct fip_mac_desc *)desc;
1204 if (dlen < sizeof(*mp)) 1221 if (dlen < sizeof(*mp))
1205 return; 1222 goto err;
1206 if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac)) 1223 if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac))
1207 return; 1224 goto err;
1208 desc_mask &= ~BIT(FIP_DT_MAC); 1225 desc_mask &= ~BIT(FIP_DT_MAC);
1209 break; 1226 break;
1210 case FIP_DT_NAME: 1227 case FIP_DT_NAME:
1211 wp = (struct fip_wwn_desc *)desc; 1228 wp = (struct fip_wwn_desc *)desc;
1212 if (dlen < sizeof(*wp)) 1229 if (dlen < sizeof(*wp))
1213 return; 1230 goto err;
1214 if (get_unaligned_be64(&wp->fd_wwn) != fcf->switch_name) 1231 if (get_unaligned_be64(&wp->fd_wwn) != fcf->switch_name)
1215 return; 1232 goto err;
1216 desc_mask &= ~BIT(FIP_DT_NAME); 1233 desc_mask &= ~BIT(FIP_DT_NAME);
1217 break; 1234 break;
1218 case FIP_DT_VN_ID: 1235 case FIP_DT_VN_ID:
1219 vp = (struct fip_vn_desc *)desc; 1236 vp = (struct fip_vn_desc *)desc;
1220 if (dlen < sizeof(*vp)) 1237 if (dlen < sizeof(*vp))
1221 return; 1238 goto err;
1222 if (compare_ether_addr(vp->fd_mac, 1239 vlink_desc_arr[num_vlink_desc++] = vp;
1223 fip->get_src_addr(lport)) == 0 && 1240 vn_port = fc_vport_id_lookup(lport,
1224 get_unaligned_be64(&vp->fd_wwpn) == lport->wwpn && 1241 ntoh24(vp->fd_fc_id));
1225 ntoh24(vp->fd_fc_id) == lport->port_id) { 1242 if (vn_port && (vn_port == lport)) {
1226 desc_mask &= ~BIT(FIP_DT_VN_ID); 1243 mutex_lock(&fip->ctlr_mutex);
1227 break; 1244 per_cpu_ptr(lport->dev_stats,
1245 get_cpu())->VLinkFailureCount++;
1246 put_cpu();
1247 fcoe_ctlr_reset(fip);
1248 mutex_unlock(&fip->ctlr_mutex);
1228 } 1249 }
1229 /* check if clr_vlink is for NPIV port */
1230 mutex_lock(&lport->lp_mutex);
1231 list_for_each_entry(vn_port, &lport->vports, list) {
1232 if (compare_ether_addr(vp->fd_mac,
1233 fip->get_src_addr(vn_port)) == 0 &&
1234 (get_unaligned_be64(&vp->fd_wwpn)
1235 == vn_port->wwpn) &&
1236 (ntoh24(vp->fd_fc_id) ==
1237 fc_host_port_id(vn_port->host))) {
1238 desc_mask &= ~BIT(FIP_DT_VN_ID);
1239 is_vn_port = 1;
1240 break;
1241 }
1242 }
1243 mutex_unlock(&lport->lp_mutex);
1244
1245 break; 1250 break;
1246 default: 1251 default:
1247 /* standard says ignore unknown descriptors >= 128 */ 1252 /* standard says ignore unknown descriptors >= 128 */
1248 if (desc->fip_dtype < FIP_DT_VENDOR_BASE) 1253 if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
1249 return; 1254 goto err;
1250 break; 1255 break;
1251 } 1256 }
1252 desc = (struct fip_desc *)((char *)desc + dlen); 1257 desc = (struct fip_desc *)((char *)desc + dlen);
@@ -1256,26 +1261,68 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
1256 /* 1261 /*
1257 * reset only if all required descriptors were present and valid. 1262 * reset only if all required descriptors were present and valid.
1258 */ 1263 */
1259 if (desc_mask) { 1264 if (desc_mask)
1260 LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n", 1265 LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n",
1261 desc_mask); 1266 desc_mask);
1267 else if (!num_vlink_desc) {
1268 LIBFCOE_FIP_DBG(fip, "CVL: no Vx_Port descriptor found\n");
1269 /*
1270 * No Vx_Port description. Clear all NPIV ports,
1271 * followed by physical port
1272 */
1273 mutex_lock(&lport->lp_mutex);
1274 list_for_each_entry(vn_port, &lport->vports, list)
1275 fc_lport_reset(vn_port);
1276 mutex_unlock(&lport->lp_mutex);
1277
1278 mutex_lock(&fip->ctlr_mutex);
1279 per_cpu_ptr(lport->dev_stats,
1280 get_cpu())->VLinkFailureCount++;
1281 put_cpu();
1282 fcoe_ctlr_reset(fip);
1283 mutex_unlock(&fip->ctlr_mutex);
1284
1285 fc_lport_reset(fip->lp);
1286 fcoe_ctlr_solicit(fip, NULL);
1262 } else { 1287 } else {
1263 LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n"); 1288 int i;
1264 1289
1265 if (is_vn_port) 1290 LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
1266 fc_lport_reset(vn_port); 1291 for (i = 0; i < num_vlink_desc; i++) {
1267 else { 1292 vp = vlink_desc_arr[i];
1268 mutex_lock(&fip->ctlr_mutex); 1293 vn_port = fc_vport_id_lookup(lport,
1269 per_cpu_ptr(lport->dev_stats, 1294 ntoh24(vp->fd_fc_id));
1270 get_cpu())->VLinkFailureCount++; 1295 if (!vn_port)
1271 put_cpu(); 1296 continue;
1272 fcoe_ctlr_reset(fip); 1297
1273 mutex_unlock(&fip->ctlr_mutex); 1298 /*
1299 * 'port_id' is already validated, check MAC address and
1300 * wwpn
1301 */
1302 if (compare_ether_addr(fip->get_src_addr(vn_port),
1303 vp->fd_mac) != 0 ||
1304 get_unaligned_be64(&vp->fd_wwpn) !=
1305 vn_port->wwpn)
1306 continue;
1307
1308 if (vn_port == lport)
1309 /*
1310 * Physical port, defer processing till all
1311 * listed NPIV ports are cleared
1312 */
1313 reset_phys_port = 1;
1314 else /* NPIV port */
1315 fc_lport_reset(vn_port);
1316 }
1274 1317
1318 if (reset_phys_port) {
1275 fc_lport_reset(fip->lp); 1319 fc_lport_reset(fip->lp);
1276 fcoe_ctlr_solicit(fip, NULL); 1320 fcoe_ctlr_solicit(fip, NULL);
1277 } 1321 }
1278 } 1322 }
1323
1324err:
1325 kfree(vlink_desc_arr);
1279} 1326}
1280 1327
1281/** 1328/**
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index f81f77c8569e..41068e8748e7 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -544,16 +544,6 @@ static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
544 struct fcoe_transport *ft = NULL; 544 struct fcoe_transport *ft = NULL;
545 enum fip_state fip_mode = (enum fip_state)(long)kp->arg; 545 enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
546 546
547#ifdef CONFIG_LIBFCOE_MODULE
548 /*
549 * Make sure the module has been initialized, and is not about to be
550 * removed. Module parameter sysfs files are writable before the
551 * module_init function is called and after module_exit.
552 */
553 if (THIS_MODULE->state != MODULE_STATE_LIVE)
554 goto out_nodev;
555#endif
556
557 mutex_lock(&ft_mutex); 547 mutex_lock(&ft_mutex);
558 548
559 netdev = fcoe_if_to_netdev(buffer); 549 netdev = fcoe_if_to_netdev(buffer);
@@ -618,16 +608,6 @@ static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
618 struct net_device *netdev = NULL; 608 struct net_device *netdev = NULL;
619 struct fcoe_transport *ft = NULL; 609 struct fcoe_transport *ft = NULL;
620 610
621#ifdef CONFIG_LIBFCOE_MODULE
622 /*
623 * Make sure the module has been initialized, and is not about to be
624 * removed. Module parameter sysfs files are writable before the
625 * module_init function is called and after module_exit.
626 */
627 if (THIS_MODULE->state != MODULE_STATE_LIVE)
628 goto out_nodev;
629#endif
630
631 mutex_lock(&ft_mutex); 611 mutex_lock(&ft_mutex);
632 612
633 netdev = fcoe_if_to_netdev(buffer); 613 netdev = fcoe_if_to_netdev(buffer);
@@ -672,16 +652,6 @@ static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
672 struct net_device *netdev = NULL; 652 struct net_device *netdev = NULL;
673 struct fcoe_transport *ft = NULL; 653 struct fcoe_transport *ft = NULL;
674 654
675#ifdef CONFIG_LIBFCOE_MODULE
676 /*
677 * Make sure the module has been initialized, and is not about to be
678 * removed. Module parameter sysfs files are writable before the
679 * module_init function is called and after module_exit.
680 */
681 if (THIS_MODULE->state != MODULE_STATE_LIVE)
682 goto out_nodev;
683#endif
684
685 mutex_lock(&ft_mutex); 655 mutex_lock(&ft_mutex);
686 656
687 netdev = fcoe_if_to_netdev(buffer); 657 netdev = fcoe_if_to_netdev(buffer);
@@ -720,16 +690,6 @@ static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
720 struct net_device *netdev = NULL; 690 struct net_device *netdev = NULL;
721 struct fcoe_transport *ft = NULL; 691 struct fcoe_transport *ft = NULL;
722 692
723#ifdef CONFIG_LIBFCOE_MODULE
724 /*
725 * Make sure the module has been initialized, and is not about to be
726 * removed. Module parameter sysfs files are writable before the
727 * module_init function is called and after module_exit.
728 */
729 if (THIS_MODULE->state != MODULE_STATE_LIVE)
730 goto out_nodev;
731#endif
732
733 mutex_lock(&ft_mutex); 693 mutex_lock(&ft_mutex);
734 694
735 netdev = fcoe_if_to_netdev(buffer); 695 netdev = fcoe_if_to_netdev(buffer);
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index 92109b126391..112f1bec7756 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -2227,7 +2227,7 @@ static int in2000_proc_info(struct Scsi_Host *instance, char *buf, char **start,
2227 bp = buf; 2227 bp = buf;
2228 *bp = '\0'; 2228 *bp = '\0';
2229 if (hd->proc & PR_VERSION) { 2229 if (hd->proc & PR_VERSION) {
2230 sprintf(tbuf, "\nVersion %s - %s. Compiled %s %s", IN2000_VERSION, IN2000_DATE, __DATE__, __TIME__); 2230 sprintf(tbuf, "\nVersion %s - %s.", IN2000_VERSION, IN2000_DATE);
2231 strcat(bp, tbuf); 2231 strcat(bp, tbuf);
2232 } 2232 }
2233 if (hd->proc & PR_INFO) { 2233 if (hd->proc & PR_INFO) {
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 12868ca46110..888086c4e709 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -5149,21 +5149,21 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5149 5149
5150 if (ipr_cmd != NULL) { 5150 if (ipr_cmd != NULL) {
5151 /* Clear the PCI interrupt */ 5151 /* Clear the PCI interrupt */
5152 num_hrrq = 0;
5152 do { 5153 do {
5153 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); 5154 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5154 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5155 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5155 } while (int_reg & IPR_PCII_HRRQ_UPDATED && 5156 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5156 num_hrrq++ < IPR_MAX_HRRQ_RETRIES); 5157 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5157 5158
5158 if (int_reg & IPR_PCII_HRRQ_UPDATED) {
5159 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
5160 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5161 return IRQ_HANDLED;
5162 }
5163
5164 } else if (rc == IRQ_NONE && irq_none == 0) { 5159 } else if (rc == IRQ_NONE && irq_none == 0) {
5165 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5160 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5166 irq_none++; 5161 irq_none++;
5162 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5163 int_reg & IPR_PCII_HRRQ_UPDATED) {
5164 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
5165 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5166 return IRQ_HANDLED;
5167 } else 5167 } else
5168 break; 5168 break;
5169 } 5169 }
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 911b2736cafa..b9cb8140b398 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -205,6 +205,7 @@ static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp)
205 default: 205 default:
206 FC_DISC_DBG(disc, "Received an unsupported request, " 206 FC_DISC_DBG(disc, "Received an unsupported request, "
207 "the opcode is (%x)\n", op); 207 "the opcode is (%x)\n", op);
208 fc_frame_free(fp);
208 break; 209 break;
209 } 210 }
210} 211}
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 77035a746f60..3b8a6451ea28 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1434,6 +1434,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1434 (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == 1434 (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1435 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { 1435 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1436 spin_lock_bh(&ep->ex_lock); 1436 spin_lock_bh(&ep->ex_lock);
1437 resp = ep->resp;
1437 rc = fc_exch_done_locked(ep); 1438 rc = fc_exch_done_locked(ep);
1438 WARN_ON(fc_seq_exch(sp) != ep); 1439 WARN_ON(fc_seq_exch(sp) != ep);
1439 spin_unlock_bh(&ep->ex_lock); 1440 spin_unlock_bh(&ep->ex_lock);
@@ -1978,6 +1979,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
1978 spin_unlock_bh(&ep->ex_lock); 1979 spin_unlock_bh(&ep->ex_lock);
1979 return sp; 1980 return sp;
1980err: 1981err:
1982 fc_fcp_ddp_done(fr_fsp(fp));
1981 rc = fc_exch_done_locked(ep); 1983 rc = fc_exch_done_locked(ep);
1982 spin_unlock_bh(&ep->ex_lock); 1984 spin_unlock_bh(&ep->ex_lock);
1983 if (!rc) 1985 if (!rc)
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 2a3a4720a771..9cd2149519ac 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -312,7 +312,7 @@ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
312 * DDP related resources for a fcp_pkt 312 * DDP related resources for a fcp_pkt
313 * @fsp: The FCP packet that DDP had been used on 313 * @fsp: The FCP packet that DDP had been used on
314 */ 314 */
315static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) 315void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
316{ 316{
317 struct fc_lport *lport; 317 struct fc_lport *lport;
318 318
@@ -681,8 +681,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
681 error = lport->tt.seq_send(lport, seq, fp); 681 error = lport->tt.seq_send(lport, seq, fp);
682 if (error) { 682 if (error) {
683 WARN_ON(1); /* send error should be rare */ 683 WARN_ON(1); /* send error should be rare */
684 fc_fcp_retry_cmd(fsp); 684 return error;
685 return 0;
686 } 685 }
687 fp = NULL; 686 fp = NULL;
688 } 687 }
@@ -1673,7 +1672,8 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1673 FC_FCTL_REQ, 0); 1672 FC_FCTL_REQ, 0);
1674 1673
1675 rec_tov = get_fsp_rec_tov(fsp); 1674 rec_tov = get_fsp_rec_tov(fsp);
1676 seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL, 1675 seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp,
1676 fc_fcp_pkt_destroy,
1677 fsp, jiffies_to_msecs(rec_tov)); 1677 fsp, jiffies_to_msecs(rec_tov));
1678 if (!seq) 1678 if (!seq)
1679 goto retry; 1679 goto retry;
@@ -1720,7 +1720,6 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1720 return; 1720 return;
1721 } 1721 }
1722 1722
1723 fsp->recov_seq = NULL;
1724 switch (fc_frame_payload_op(fp)) { 1723 switch (fc_frame_payload_op(fp)) {
1725 case ELS_LS_ACC: 1724 case ELS_LS_ACC:
1726 fsp->recov_retry = 0; 1725 fsp->recov_retry = 0;
@@ -1732,10 +1731,9 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1732 break; 1731 break;
1733 } 1732 }
1734 fc_fcp_unlock_pkt(fsp); 1733 fc_fcp_unlock_pkt(fsp);
1735 fsp->lp->tt.exch_done(seq);
1736out: 1734out:
1735 fsp->lp->tt.exch_done(seq);
1737 fc_frame_free(fp); 1736 fc_frame_free(fp);
1738 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
1739} 1737}
1740 1738
1741/** 1739/**
@@ -1747,8 +1745,6 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1747{ 1745{
1748 if (fc_fcp_lock_pkt(fsp)) 1746 if (fc_fcp_lock_pkt(fsp))
1749 goto out; 1747 goto out;
1750 fsp->lp->tt.exch_done(fsp->recov_seq);
1751 fsp->recov_seq = NULL;
1752 switch (PTR_ERR(fp)) { 1748 switch (PTR_ERR(fp)) {
1753 case -FC_EX_TIMEOUT: 1749 case -FC_EX_TIMEOUT:
1754 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1750 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
@@ -1764,7 +1760,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1764 } 1760 }
1765 fc_fcp_unlock_pkt(fsp); 1761 fc_fcp_unlock_pkt(fsp);
1766out: 1762out:
1767 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ 1763 fsp->lp->tt.exch_done(fsp->recov_seq);
1768} 1764}
1769 1765
1770/** 1766/**
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index fedc819d70c0..c7d071289af5 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -108,6 +108,7 @@ extern struct fc4_prov fc_rport_fcp_init; /* FCP initiator provider */
108 * Set up direct-data placement for this I/O request 108 * Set up direct-data placement for this I/O request
109 */ 109 */
110void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid); 110void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid);
111void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp);
111 112
112/* 113/*
113 * Module setup functions 114 * Module setup functions
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 31fc21f4d831..db9238f2ecb8 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -99,19 +99,29 @@ static void sas_ata_task_done(struct sas_task *task)
99 struct sas_ha_struct *sas_ha; 99 struct sas_ha_struct *sas_ha;
100 enum ata_completion_errors ac; 100 enum ata_completion_errors ac;
101 unsigned long flags; 101 unsigned long flags;
102 struct ata_link *link;
102 103
103 if (!qc) 104 if (!qc)
104 goto qc_already_gone; 105 goto qc_already_gone;
105 106
106 dev = qc->ap->private_data; 107 dev = qc->ap->private_data;
107 sas_ha = dev->port->ha; 108 sas_ha = dev->port->ha;
109 link = &dev->sata_dev.ap->link;
108 110
109 spin_lock_irqsave(dev->sata_dev.ap->lock, flags); 111 spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
110 if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD || 112 if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
111 ((stat->stat == SAM_STAT_CHECK_CONDITION && 113 ((stat->stat == SAM_STAT_CHECK_CONDITION &&
112 dev->sata_dev.command_set == ATAPI_COMMAND_SET))) { 114 dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
113 ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf); 115 ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
114 qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command); 116
117 if (!link->sactive) {
118 qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
119 } else {
120 link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
121 if (unlikely(link->eh_info.err_mask))
122 qc->flags |= ATA_QCFLAG_FAILED;
123 }
124
115 dev->sata_dev.sstatus = resp->sstatus; 125 dev->sata_dev.sstatus = resp->sstatus;
116 dev->sata_dev.serror = resp->serror; 126 dev->sata_dev.serror = resp->serror;
117 dev->sata_dev.scontrol = resp->scontrol; 127 dev->sata_dev.scontrol = resp->scontrol;
@@ -121,7 +131,13 @@ static void sas_ata_task_done(struct sas_task *task)
121 SAS_DPRINTK("%s: SAS error %x\n", __func__, 131 SAS_DPRINTK("%s: SAS error %x\n", __func__,
122 stat->stat); 132 stat->stat);
123 /* We saw a SAS error. Send a vague error. */ 133 /* We saw a SAS error. Send a vague error. */
124 qc->err_mask = ac; 134 if (!link->sactive) {
135 qc->err_mask = ac;
136 } else {
137 link->eh_info.err_mask |= AC_ERR_DEV;
138 qc->flags |= ATA_QCFLAG_FAILED;
139 }
140
125 dev->sata_dev.tf.feature = 0x04; /* status err */ 141 dev->sata_dev.tf.feature = 0x04; /* status err */
126 dev->sata_dev.tf.command = ATA_ERR; 142 dev->sata_dev.tf.command = ATA_ERR;
127 } 143 }
@@ -279,6 +295,44 @@ static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
279 return ret; 295 return ret;
280} 296}
281 297
298static int sas_ata_soft_reset(struct ata_link *link, unsigned int *class,
299 unsigned long deadline)
300{
301 struct ata_port *ap = link->ap;
302 struct domain_device *dev = ap->private_data;
303 struct sas_internal *i =
304 to_sas_internal(dev->port->ha->core.shost->transportt);
305 int res = TMF_RESP_FUNC_FAILED;
306 int ret = 0;
307
308 if (i->dft->lldd_ata_soft_reset)
309 res = i->dft->lldd_ata_soft_reset(dev);
310
311 if (res != TMF_RESP_FUNC_COMPLETE) {
312 SAS_DPRINTK("%s: Unable to soft reset\n", __func__);
313 ret = -EAGAIN;
314 }
315
316 switch (dev->sata_dev.command_set) {
317 case ATA_COMMAND_SET:
318 SAS_DPRINTK("%s: Found ATA device.\n", __func__);
319 *class = ATA_DEV_ATA;
320 break;
321 case ATAPI_COMMAND_SET:
322 SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
323 *class = ATA_DEV_ATAPI;
324 break;
325 default:
326 SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
327 __func__, dev->sata_dev.command_set);
328 *class = ATA_DEV_UNKNOWN;
329 break;
330 }
331
332 ap->cbl = ATA_CBL_SATA;
333 return ret;
334}
335
282static void sas_ata_post_internal(struct ata_queued_cmd *qc) 336static void sas_ata_post_internal(struct ata_queued_cmd *qc)
283{ 337{
284 if (qc->flags & ATA_QCFLAG_FAILED) 338 if (qc->flags & ATA_QCFLAG_FAILED)
@@ -309,7 +363,7 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
309 363
310static struct ata_port_operations sas_sata_ops = { 364static struct ata_port_operations sas_sata_ops = {
311 .prereset = ata_std_prereset, 365 .prereset = ata_std_prereset,
312 .softreset = NULL, 366 .softreset = sas_ata_soft_reset,
313 .hardreset = sas_ata_hard_reset, 367 .hardreset = sas_ata_hard_reset,
314 .postreset = ata_std_postreset, 368 .postreset = ata_std_postreset,
315 .error_handler = ata_std_error_handler, 369 .error_handler = ata_std_error_handler,
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 8b538bd1ff2b..14e21b5fb8ba 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -57,7 +57,7 @@ int sas_init_queue(struct sas_ha_struct *sas_ha);
57int sas_init_events(struct sas_ha_struct *sas_ha); 57int sas_init_events(struct sas_ha_struct *sas_ha);
58void sas_shutdown_queue(struct sas_ha_struct *sas_ha); 58void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
59 59
60void sas_deform_port(struct asd_sas_phy *phy); 60void sas_deform_port(struct asd_sas_phy *phy, int gone);
61 61
62void sas_porte_bytes_dmaed(struct work_struct *work); 62void sas_porte_bytes_dmaed(struct work_struct *work);
63void sas_porte_broadcast_rcvd(struct work_struct *work); 63void sas_porte_broadcast_rcvd(struct work_struct *work);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index b459c4b635b1..e0f5018e9071 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -39,7 +39,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
39 sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock, 39 sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
40 &phy->phy_events_pending); 40 &phy->phy_events_pending);
41 phy->error = 0; 41 phy->error = 0;
42 sas_deform_port(phy); 42 sas_deform_port(phy, 1);
43} 43}
44 44
45static void sas_phye_oob_done(struct work_struct *work) 45static void sas_phye_oob_done(struct work_struct *work)
@@ -66,7 +66,7 @@ static void sas_phye_oob_error(struct work_struct *work)
66 sas_begin_event(PHYE_OOB_ERROR, &phy->ha->event_lock, 66 sas_begin_event(PHYE_OOB_ERROR, &phy->ha->event_lock,
67 &phy->phy_events_pending); 67 &phy->phy_events_pending);
68 68
69 sas_deform_port(phy); 69 sas_deform_port(phy, 1);
70 70
71 if (!port && phy->enabled && i->dft->lldd_control_phy) { 71 if (!port && phy->enabled && i->dft->lldd_control_phy) {
72 phy->error++; 72 phy->error++;
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 5257fdfe699a..42fd1f25b664 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -57,7 +57,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
57 57
58 if (port) { 58 if (port) {
59 if (!phy_is_wideport_member(port, phy)) 59 if (!phy_is_wideport_member(port, phy))
60 sas_deform_port(phy); 60 sas_deform_port(phy, 0);
61 else { 61 else {
62 SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n", 62 SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
63 __func__, phy->id, phy->port->id, 63 __func__, phy->id, phy->port->id,
@@ -153,28 +153,31 @@ static void sas_form_port(struct asd_sas_phy *phy)
153 * This is called when the physical link to the other phy has been 153 * This is called when the physical link to the other phy has been
154 * lost (on this phy), in Event thread context. We cannot delay here. 154 * lost (on this phy), in Event thread context. We cannot delay here.
155 */ 155 */
156void sas_deform_port(struct asd_sas_phy *phy) 156void sas_deform_port(struct asd_sas_phy *phy, int gone)
157{ 157{
158 struct sas_ha_struct *sas_ha = phy->ha; 158 struct sas_ha_struct *sas_ha = phy->ha;
159 struct asd_sas_port *port = phy->port; 159 struct asd_sas_port *port = phy->port;
160 struct sas_internal *si = 160 struct sas_internal *si =
161 to_sas_internal(sas_ha->core.shost->transportt); 161 to_sas_internal(sas_ha->core.shost->transportt);
162 struct domain_device *dev;
162 unsigned long flags; 163 unsigned long flags;
163 164
164 if (!port) 165 if (!port)
165 return; /* done by a phy event */ 166 return; /* done by a phy event */
166 167
167 if (port->port_dev) 168 dev = port->port_dev;
168 port->port_dev->pathways--; 169 if (dev)
170 dev->pathways--;
169 171
170 if (port->num_phys == 1) { 172 if (port->num_phys == 1) {
173 if (dev && gone)
174 dev->gone = 1;
171 sas_unregister_domain_devices(port); 175 sas_unregister_domain_devices(port);
172 sas_port_delete(port->port); 176 sas_port_delete(port->port);
173 port->port = NULL; 177 port->port = NULL;
174 } else 178 } else
175 sas_port_delete_phy(port->port, phy->phy); 179 sas_port_delete_phy(port->port, phy->phy);
176 180
177
178 if (si->dft->lldd_port_deformed) 181 if (si->dft->lldd_port_deformed)
179 si->dft->lldd_port_deformed(phy); 182 si->dft->lldd_port_deformed(phy);
180 183
@@ -244,7 +247,7 @@ void sas_porte_link_reset_err(struct work_struct *work)
244 sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock, 247 sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
245 &phy->port_events_pending); 248 &phy->port_events_pending);
246 249
247 sas_deform_port(phy); 250 sas_deform_port(phy, 1);
248} 251}
249 252
250void sas_porte_timer_event(struct work_struct *work) 253void sas_porte_timer_event(struct work_struct *work)
@@ -256,7 +259,7 @@ void sas_porte_timer_event(struct work_struct *work)
256 sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock, 259 sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
257 &phy->port_events_pending); 260 &phy->port_events_pending);
258 261
259 sas_deform_port(phy); 262 sas_deform_port(phy, 1);
260} 263}
261 264
262void sas_porte_hard_reset(struct work_struct *work) 265void sas_porte_hard_reset(struct work_struct *work)
@@ -268,7 +271,7 @@ void sas_porte_hard_reset(struct work_struct *work)
268 sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock, 271 sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
269 &phy->port_events_pending); 272 &phy->port_events_pending);
270 273
271 sas_deform_port(phy); 274 sas_deform_port(phy, 1);
272} 275}
273 276
274/* ---------- SAS port registration ---------- */ 277/* ---------- SAS port registration ---------- */
@@ -306,6 +309,6 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha)
306 309
307 for (i = 0; i < sas_ha->num_phys; i++) 310 for (i = 0; i < sas_ha->num_phys; i++)
308 if (sas_ha->sas_phy[i]->port) 311 if (sas_ha->sas_phy[i]->port)
309 sas_deform_port(sas_ha->sas_phy[i]); 312 sas_deform_port(sas_ha->sas_phy[i], 0);
310 313
311} 314}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index f6e189f40917..eeba76cdf774 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -207,6 +207,13 @@ static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
207 struct sas_ha_struct *sas_ha = dev->port->ha; 207 struct sas_ha_struct *sas_ha = dev->port->ha;
208 struct sas_task *task; 208 struct sas_task *task;
209 209
210 /* If the device fell off, no sense in issuing commands */
211 if (dev->gone) {
212 cmd->result = DID_BAD_TARGET << 16;
213 scsi_done(cmd);
214 goto out;
215 }
216
210 if (dev_is_sata(dev)) { 217 if (dev_is_sata(dev)) {
211 unsigned long flags; 218 unsigned long flags;
212 219
@@ -216,13 +223,6 @@ static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
216 goto out; 223 goto out;
217 } 224 }
218 225
219 /* If the device fell off, no sense in issuing commands */
220 if (dev->gone) {
221 cmd->result = DID_BAD_TARGET << 16;
222 scsi_done(cmd);
223 goto out;
224 }
225
226 res = -ENOMEM; 226 res = -ENOMEM;
227 task = sas_create_task(cmd, dev, GFP_ATOMIC); 227 task = sas_create_task(cmd, dev, GFP_ATOMIC);
228 if (!task) 228 if (!task)
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 02d53d89534f..8ec2c86a49d4 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -41,6 +41,7 @@ struct lpfc_sli2_slim;
41 downloads using bsg */ 41 downloads using bsg */
42#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ 42#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
43#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ 43#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
44#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
44#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/ 45#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
45#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ 46#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
46#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ 47#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
@@ -486,6 +487,42 @@ struct unsol_rcv_ct_ctx {
486 (1 << LPFC_USER_LINK_SPEED_AUTO)) 487 (1 << LPFC_USER_LINK_SPEED_AUTO))
487#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16" 488#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16"
488 489
490enum nemb_type {
491 nemb_mse = 1,
492 nemb_hbd
493};
494
495enum mbox_type {
496 mbox_rd = 1,
497 mbox_wr
498};
499
500enum dma_type {
501 dma_mbox = 1,
502 dma_ebuf
503};
504
505enum sta_type {
506 sta_pre_addr = 1,
507 sta_pos_addr
508};
509
510struct lpfc_mbox_ext_buf_ctx {
511 uint32_t state;
512#define LPFC_BSG_MBOX_IDLE 0
513#define LPFC_BSG_MBOX_HOST 1
514#define LPFC_BSG_MBOX_PORT 2
515#define LPFC_BSG_MBOX_DONE 3
516#define LPFC_BSG_MBOX_ABTS 4
517 enum nemb_type nembType;
518 enum mbox_type mboxType;
519 uint32_t numBuf;
520 uint32_t mbxTag;
521 uint32_t seqNum;
522 struct lpfc_dmabuf *mbx_dmabuf;
523 struct list_head ext_dmabuf_list;
524};
525
489struct lpfc_hba { 526struct lpfc_hba {
490 /* SCSI interface function jump table entries */ 527 /* SCSI interface function jump table entries */
491 int (*lpfc_new_scsi_buf) 528 int (*lpfc_new_scsi_buf)
@@ -589,6 +626,7 @@ struct lpfc_hba {
589 626
590 MAILBOX_t *mbox; 627 MAILBOX_t *mbox;
591 uint32_t *mbox_ext; 628 uint32_t *mbox_ext;
629 struct lpfc_mbox_ext_buf_ctx mbox_ext_buf_ctx;
592 uint32_t ha_copy; 630 uint32_t ha_copy;
593 struct _PCB *pcb; 631 struct _PCB *pcb;
594 struct _IOCB *IOCBs; 632 struct _IOCB *IOCBs;
@@ -659,6 +697,7 @@ struct lpfc_hba {
659 uint32_t cfg_hostmem_hgp; 697 uint32_t cfg_hostmem_hgp;
660 uint32_t cfg_log_verbose; 698 uint32_t cfg_log_verbose;
661 uint32_t cfg_aer_support; 699 uint32_t cfg_aer_support;
700 uint32_t cfg_sriov_nr_virtfn;
662 uint32_t cfg_iocb_cnt; 701 uint32_t cfg_iocb_cnt;
663 uint32_t cfg_suppress_link_up; 702 uint32_t cfg_suppress_link_up;
664#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ 703#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
@@ -706,7 +745,6 @@ struct lpfc_hba {
706 uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */ 745 uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
707 746
708 int brd_no; /* FC board number */ 747 int brd_no; /* FC board number */
709
710 char SerialNumber[32]; /* adapter Serial Number */ 748 char SerialNumber[32]; /* adapter Serial Number */
711 char OptionROMVersion[32]; /* adapter BIOS / Fcode version */ 749 char OptionROMVersion[32]; /* adapter BIOS / Fcode version */
712 char ModelDesc[256]; /* Model Description */ 750 char ModelDesc[256]; /* Model Description */
@@ -778,6 +816,9 @@ struct lpfc_hba {
778 uint16_t vpi_base; 816 uint16_t vpi_base;
779 uint16_t vfi_base; 817 uint16_t vfi_base;
780 unsigned long *vpi_bmask; /* vpi allocation table */ 818 unsigned long *vpi_bmask; /* vpi allocation table */
819 uint16_t *vpi_ids;
820 uint16_t vpi_count;
821 struct list_head lpfc_vpi_blk_list;
781 822
782 /* Data structure used by fabric iocb scheduler */ 823 /* Data structure used by fabric iocb scheduler */
783 struct list_head fabric_iocb_list; 824 struct list_head fabric_iocb_list;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 8dcbf8fff673..135a53baa735 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -755,6 +755,73 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
755} 755}
756 756
757/** 757/**
758 * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
759 * @phba: lpfc_hba pointer.
760 *
761 * Description:
762 * Request SLI4 interface type-2 device to perform a physical register set
763 * access.
764 *
765 * Returns:
766 * zero for success
767 **/
768static ssize_t
769lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
770{
771 struct completion online_compl;
772 uint32_t reg_val;
773 int status = 0;
774 int rc;
775
776 if (!phba->cfg_enable_hba_reset)
777 return -EIO;
778
779 if ((phba->sli_rev < LPFC_SLI_REV4) ||
780 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
781 LPFC_SLI_INTF_IF_TYPE_2))
782 return -EPERM;
783
784 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
785
786 if (status != 0)
787 return status;
788
789 /* wait for the device to be quiesced before firmware reset */
790 msleep(100);
791
792 reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
793 LPFC_CTL_PDEV_CTL_OFFSET);
794
795 if (opcode == LPFC_FW_DUMP)
796 reg_val |= LPFC_FW_DUMP_REQUEST;
797 else if (opcode == LPFC_FW_RESET)
798 reg_val |= LPFC_CTL_PDEV_CTL_FRST;
799 else if (opcode == LPFC_DV_RESET)
800 reg_val |= LPFC_CTL_PDEV_CTL_DRST;
801
802 writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
803 LPFC_CTL_PDEV_CTL_OFFSET);
804 /* flush */
805 readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
806
807 /* delay driver action following IF_TYPE_2 reset */
808 msleep(100);
809
810 init_completion(&online_compl);
811 rc = lpfc_workq_post_event(phba, &status, &online_compl,
812 LPFC_EVT_ONLINE);
813 if (rc == 0)
814 return -ENOMEM;
815
816 wait_for_completion(&online_compl);
817
818 if (status != 0)
819 return -EIO;
820
821 return 0;
822}
823
824/**
758 * lpfc_nport_evt_cnt_show - Return the number of nport events 825 * lpfc_nport_evt_cnt_show - Return the number of nport events
759 * @dev: class device that is converted into a Scsi_host. 826 * @dev: class device that is converted into a Scsi_host.
760 * @attr: device attribute, not used. 827 * @attr: device attribute, not used.
@@ -848,6 +915,12 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
848 return -EINVAL; 915 return -EINVAL;
849 else 916 else
850 status = lpfc_do_offline(phba, LPFC_EVT_KILL); 917 status = lpfc_do_offline(phba, LPFC_EVT_KILL);
918 else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
919 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
920 else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
921 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
922 else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
923 status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
851 else 924 else
852 return -EINVAL; 925 return -EINVAL;
853 926
@@ -1322,6 +1395,102 @@ lpfc_dss_show(struct device *dev, struct device_attribute *attr,
1322} 1395}
1323 1396
1324/** 1397/**
1398 * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
1399 * @dev: class converted to a Scsi_host structure.
1400 * @attr: device attribute, not used.
1401 * @buf: on return contains the formatted support level.
1402 *
1403 * Description:
1404 * Returns the maximum number of virtual functions a physical function can
1405 * support, 0 will be returned if called on virtual function.
1406 *
1407 * Returns: size of formatted string.
1408 **/
1409static ssize_t
1410lpfc_sriov_hw_max_virtfn_show(struct device *dev,
1411 struct device_attribute *attr,
1412 char *buf)
1413{
1414 struct Scsi_Host *shost = class_to_shost(dev);
1415 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1416 struct lpfc_hba *phba = vport->phba;
1417 struct pci_dev *pdev = phba->pcidev;
1418 union lpfc_sli4_cfg_shdr *shdr;
1419 uint32_t shdr_status, shdr_add_status;
1420 LPFC_MBOXQ_t *mboxq;
1421 struct lpfc_mbx_get_prof_cfg *get_prof_cfg;
1422 struct lpfc_rsrc_desc_pcie *desc;
1423 uint32_t max_nr_virtfn;
1424 uint32_t desc_count;
1425 int length, rc, i;
1426
1427 if ((phba->sli_rev < LPFC_SLI_REV4) ||
1428 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
1429 LPFC_SLI_INTF_IF_TYPE_2))
1430 return -EPERM;
1431
1432 if (!pdev->is_physfn)
1433 return snprintf(buf, PAGE_SIZE, "%d\n", 0);
1434
1435 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1436 if (!mboxq)
1437 return -ENOMEM;
1438
1439 /* get the maximum number of virtfn support by physfn */
1440 length = (sizeof(struct lpfc_mbx_get_prof_cfg) -
1441 sizeof(struct lpfc_sli4_cfg_mhdr));
1442 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
1443 LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG,
1444 length, LPFC_SLI4_MBX_EMBED);
1445 shdr = (union lpfc_sli4_cfg_shdr *)
1446 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
1447 bf_set(lpfc_mbox_hdr_pf_num, &shdr->request,
1448 phba->sli4_hba.iov.pf_number + 1);
1449
1450 get_prof_cfg = &mboxq->u.mqe.un.get_prof_cfg;
1451 bf_set(lpfc_mbx_get_prof_cfg_prof_tp, &get_prof_cfg->u.request,
1452 LPFC_CFG_TYPE_CURRENT_ACTIVE);
1453
1454 rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
1455 lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
1456
1457 if (rc != MBX_TIMEOUT) {
1458 /* check return status */
1459 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1460 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
1461 &shdr->response);
1462 if (shdr_status || shdr_add_status || rc)
1463 goto error_out;
1464
1465 } else
1466 goto error_out;
1467
1468 desc_count = get_prof_cfg->u.response.prof_cfg.rsrc_desc_count;
1469
1470 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
1471 desc = (struct lpfc_rsrc_desc_pcie *)
1472 &get_prof_cfg->u.response.prof_cfg.desc[i];
1473 if (LPFC_RSRC_DESC_TYPE_PCIE ==
1474 bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
1475 max_nr_virtfn = bf_get(lpfc_rsrc_desc_pcie_nr_virtfn,
1476 desc);
1477 break;
1478 }
1479 }
1480
1481 if (i < LPFC_RSRC_DESC_MAX_NUM) {
1482 if (rc != MBX_TIMEOUT)
1483 mempool_free(mboxq, phba->mbox_mem_pool);
1484 return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
1485 }
1486
1487error_out:
1488 if (rc != MBX_TIMEOUT)
1489 mempool_free(mboxq, phba->mbox_mem_pool);
1490 return -EIO;
1491}
1492
1493/**
1325 * lpfc_param_show - Return a cfg attribute value in decimal 1494 * lpfc_param_show - Return a cfg attribute value in decimal
1326 * 1495 *
1327 * Description: 1496 * Description:
@@ -1762,6 +1931,8 @@ static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
1762static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL); 1931static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
1763static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL); 1932static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
1764static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL); 1933static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
1934static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
1935 lpfc_sriov_hw_max_virtfn_show, NULL);
1765 1936
1766static char *lpfc_soft_wwn_key = "C99G71SL8032A"; 1937static char *lpfc_soft_wwn_key = "C99G71SL8032A";
1767 1938
@@ -3014,7 +3185,7 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
3014 * 3185 *
3015 * @dev: class device that is converted into a Scsi_host. 3186 * @dev: class device that is converted into a Scsi_host.
3016 * @attr: device attribute, not used. 3187 * @attr: device attribute, not used.
3017 * @buf: containing the string "selective". 3188 * @buf: containing enable or disable aer flag.
3018 * @count: unused variable. 3189 * @count: unused variable.
3019 * 3190 *
3020 * Description: 3191 * Description:
@@ -3098,7 +3269,7 @@ lpfc_param_show(aer_support)
3098/** 3269/**
3099 * lpfc_aer_support_init - Set the initial adapters aer support flag 3270 * lpfc_aer_support_init - Set the initial adapters aer support flag
3100 * @phba: lpfc_hba pointer. 3271 * @phba: lpfc_hba pointer.
3101 * @val: link speed value. 3272 * @val: enable aer or disable aer flag.
3102 * 3273 *
3103 * Description: 3274 * Description:
3104 * If val is in a valid range [0,1], then set the adapter's initial 3275 * If val is in a valid range [0,1], then set the adapter's initial
@@ -3137,7 +3308,7 @@ static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
3137 * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device 3308 * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
3138 * @dev: class device that is converted into a Scsi_host. 3309 * @dev: class device that is converted into a Scsi_host.
3139 * @attr: device attribute, not used. 3310 * @attr: device attribute, not used.
3140 * @buf: containing the string "selective". 3311 * @buf: containing flag 1 for aer cleanup state.
3141 * @count: unused variable. 3312 * @count: unused variable.
3142 * 3313 *
3143 * Description: 3314 * Description:
@@ -3180,6 +3351,136 @@ lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
3180static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL, 3351static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
3181 lpfc_aer_cleanup_state); 3352 lpfc_aer_cleanup_state);
3182 3353
3354/**
3355 * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions
3356 *
3357 * @dev: class device that is converted into a Scsi_host.
3358 * @attr: device attribute, not used.
3359 * @buf: containing the string the number of vfs to be enabled.
3360 * @count: unused variable.
3361 *
3362 * Description:
3363 * When this api is called either through user sysfs, the driver shall
3364 * try to enable or disable SR-IOV virtual functions according to the
3365 * following:
3366 *
3367 * If zero virtual function has been enabled to the physical function,
3368 * the driver shall invoke the pci enable virtual function api trying
3369 * to enable the virtual functions. If the nr_vfn provided is greater
3370 * than the maximum supported, the maximum virtual function number will
3371 * be used for invoking the api; otherwise, the nr_vfn provided shall
3372 * be used for invoking the api. If the api call returned success, the
3373 * actual number of virtual functions enabled will be set to the driver
3374 * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver
3375 * cfg_sriov_nr_virtfn remains zero.
3376 *
3377 * If none-zero virtual functions have already been enabled to the
3378 * physical function, as reflected by the driver's cfg_sriov_nr_virtfn,
3379 * -EINVAL will be returned and the driver does nothing;
3380 *
3381 * If the nr_vfn provided is zero and none-zero virtual functions have
3382 * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the
3383 * disabling virtual function api shall be invoded to disable all the
3384 * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to
3385 * zero. Otherwise, if zero virtual function has been enabled, do
3386 * nothing.
3387 *
3388 * Returns:
3389 * length of the buf on success if val is in range the intended mode
3390 * is supported.
3391 * -EINVAL if val out of range or intended mode is not supported.
3392 **/
3393static ssize_t
3394lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
3395 const char *buf, size_t count)
3396{
3397 struct Scsi_Host *shost = class_to_shost(dev);
3398 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
3399 struct lpfc_hba *phba = vport->phba;
3400 struct pci_dev *pdev = phba->pcidev;
3401 int val = 0, rc = -EINVAL;
3402
3403 /* Sanity check on user data */
3404 if (!isdigit(buf[0]))
3405 return -EINVAL;
3406 if (sscanf(buf, "%i", &val) != 1)
3407 return -EINVAL;
3408 if (val < 0)
3409 return -EINVAL;
3410
3411 /* Request disabling virtual functions */
3412 if (val == 0) {
3413 if (phba->cfg_sriov_nr_virtfn > 0) {
3414 pci_disable_sriov(pdev);
3415 phba->cfg_sriov_nr_virtfn = 0;
3416 }
3417 return strlen(buf);
3418 }
3419
3420 /* Request enabling virtual functions */
3421 if (phba->cfg_sriov_nr_virtfn > 0) {
3422 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3423 "3018 There are %d virtual functions "
3424 "enabled on physical function.\n",
3425 phba->cfg_sriov_nr_virtfn);
3426 return -EEXIST;
3427 }
3428
3429 if (val <= LPFC_MAX_VFN_PER_PFN)
3430 phba->cfg_sriov_nr_virtfn = val;
3431 else {
3432 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3433 "3019 Enabling %d virtual functions is not "
3434 "allowed.\n", val);
3435 return -EINVAL;
3436 }
3437
3438 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
3439 if (rc) {
3440 phba->cfg_sriov_nr_virtfn = 0;
3441 rc = -EPERM;
3442 } else
3443 rc = strlen(buf);
3444
3445 return rc;
3446}
3447
3448static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN;
3449module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR);
3450MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn");
3451lpfc_param_show(sriov_nr_virtfn)
3452
3453/**
3454 * lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable
3455 * @phba: lpfc_hba pointer.
3456 * @val: link speed value.
3457 *
3458 * Description:
3459 * If val is in a valid range [0,255], then set the adapter's initial
3460 * cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum
3461 * number shall be used instead. It will be up to the driver's probe_one
3462 * routine to determine whether the device's SR-IOV is supported or not.
3463 *
3464 * Returns:
3465 * zero if val saved.
3466 * -EINVAL val out of range
3467 **/
3468static int
3469lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
3470{
3471 if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) {
3472 phba->cfg_sriov_nr_virtfn = val;
3473 return 0;
3474 }
3475
3476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3477 "3017 Enabling %d virtual functions is not "
3478 "allowed.\n", val);
3479 return -EINVAL;
3480}
3481static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
3482 lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
3483
3183/* 3484/*
3184# lpfc_fcp_class: Determines FC class to use for the FCP protocol. 3485# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
3185# Value range is [2,3]. Default value is 3. 3486# Value range is [2,3]. Default value is 3.
@@ -3497,6 +3798,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
3497 &dev_attr_lpfc_prot_sg_seg_cnt, 3798 &dev_attr_lpfc_prot_sg_seg_cnt,
3498 &dev_attr_lpfc_aer_support, 3799 &dev_attr_lpfc_aer_support,
3499 &dev_attr_lpfc_aer_state_cleanup, 3800 &dev_attr_lpfc_aer_state_cleanup,
3801 &dev_attr_lpfc_sriov_nr_virtfn,
3500 &dev_attr_lpfc_suppress_link_up, 3802 &dev_attr_lpfc_suppress_link_up,
3501 &dev_attr_lpfc_iocb_cnt, 3803 &dev_attr_lpfc_iocb_cnt,
3502 &dev_attr_iocb_hw, 3804 &dev_attr_iocb_hw,
@@ -3505,6 +3807,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
3505 &dev_attr_lpfc_fips_level, 3807 &dev_attr_lpfc_fips_level,
3506 &dev_attr_lpfc_fips_rev, 3808 &dev_attr_lpfc_fips_rev,
3507 &dev_attr_lpfc_dss, 3809 &dev_attr_lpfc_dss,
3810 &dev_attr_lpfc_sriov_hw_max_virtfn,
3508 NULL, 3811 NULL,
3509}; 3812};
3510 3813
@@ -3961,7 +4264,7 @@ static struct bin_attribute sysfs_mbox_attr = {
3961 .name = "mbox", 4264 .name = "mbox",
3962 .mode = S_IRUSR | S_IWUSR, 4265 .mode = S_IRUSR | S_IWUSR,
3963 }, 4266 },
3964 .size = MAILBOX_CMD_SIZE, 4267 .size = MAILBOX_SYSFS_MAX,
3965 .read = sysfs_mbox_read, 4268 .read = sysfs_mbox_read,
3966 .write = sysfs_mbox_write, 4269 .write = sysfs_mbox_write,
3967}; 4270};
@@ -4705,6 +5008,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4705 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 5008 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
4706 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); 5009 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
4707 lpfc_aer_support_init(phba, lpfc_aer_support); 5010 lpfc_aer_support_init(phba, lpfc_aer_support);
5011 lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
4708 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); 5012 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
4709 lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt); 5013 lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
4710 phba->cfg_enable_dss = 1; 5014 phba->cfg_enable_dss = 1;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 853e5042f39c..7fb0ba4cbfa7 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -23,6 +23,7 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/list.h>
26 27
27#include <scsi/scsi.h> 28#include <scsi/scsi.h>
28#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
@@ -79,8 +80,7 @@ struct lpfc_bsg_iocb {
79struct lpfc_bsg_mbox { 80struct lpfc_bsg_mbox {
80 LPFC_MBOXQ_t *pmboxq; 81 LPFC_MBOXQ_t *pmboxq;
81 MAILBOX_t *mb; 82 MAILBOX_t *mb;
82 struct lpfc_dmabuf *rxbmp; /* for BIU diags */ 83 struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
83 struct lpfc_dmabufext *dmp; /* for BIU diags */
84 uint8_t *ext; /* extended mailbox data */ 84 uint8_t *ext; /* extended mailbox data */
85 uint32_t mbOffset; /* from app */ 85 uint32_t mbOffset; /* from app */
86 uint32_t inExtWLen; /* from app */ 86 uint32_t inExtWLen; /* from app */
@@ -332,6 +332,8 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
332 cmd->ulpLe = 1; 332 cmd->ulpLe = 1;
333 cmd->ulpClass = CLASS3; 333 cmd->ulpClass = CLASS3;
334 cmd->ulpContext = ndlp->nlp_rpi; 334 cmd->ulpContext = ndlp->nlp_rpi;
335 if (phba->sli_rev == LPFC_SLI_REV4)
336 cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
335 cmd->ulpOwner = OWN_CHIP; 337 cmd->ulpOwner = OWN_CHIP;
336 cmdiocbq->vport = phba->pport; 338 cmdiocbq->vport = phba->pport;
337 cmdiocbq->context3 = bmp; 339 cmdiocbq->context3 = bmp;
@@ -1336,6 +1338,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1336 } 1338 }
1337 1339
1338 icmd->un.ulpWord[3] = ndlp->nlp_rpi; 1340 icmd->un.ulpWord[3] = ndlp->nlp_rpi;
1341 if (phba->sli_rev == LPFC_SLI_REV4)
1342 icmd->ulpContext =
1343 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1344
1339 /* The exchange is done, mark the entry as invalid */ 1345 /* The exchange is done, mark the entry as invalid */
1340 phba->ct_ctx[tag].flags &= ~UNSOL_VALID; 1346 phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
1341 } else 1347 } else
@@ -1463,11 +1469,91 @@ send_mgmt_rsp_exit:
1463} 1469}
1464 1470
1465/** 1471/**
1466 * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command 1472 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1473 * @phba: Pointer to HBA context object.
1467 * @job: LPFC_BSG_VENDOR_DIAG_MODE 1474 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1468 * 1475 *
1469 * This function is responsible for placing a port into diagnostic loopback 1476 * This function is responsible for preparing driver for diag loopback
1470 * mode in order to perform a diagnostic loopback test. 1477 * on device.
1478 */
1479static int
1480lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job)
1481{
1482 struct lpfc_vport **vports;
1483 struct Scsi_Host *shost;
1484 struct lpfc_sli *psli;
1485 struct lpfc_sli_ring *pring;
1486 int i = 0;
1487
1488 psli = &phba->sli;
1489 if (!psli)
1490 return -ENODEV;
1491
1492 pring = &psli->ring[LPFC_FCP_RING];
1493 if (!pring)
1494 return -ENODEV;
1495
1496 if ((phba->link_state == LPFC_HBA_ERROR) ||
1497 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1498 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
1499 return -EACCES;
1500
1501 vports = lpfc_create_vport_work_array(phba);
1502 if (vports) {
1503 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1504 shost = lpfc_shost_from_vport(vports[i]);
1505 scsi_block_requests(shost);
1506 }
1507 lpfc_destroy_vport_work_array(phba, vports);
1508 } else {
1509 shost = lpfc_shost_from_vport(phba->pport);
1510 scsi_block_requests(shost);
1511 }
1512
1513 while (pring->txcmplq_cnt) {
1514 if (i++ > 500) /* wait up to 5 seconds */
1515 break;
1516 msleep(10);
1517 }
1518 return 0;
1519}
1520
1521/**
1522 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1523 * @phba: Pointer to HBA context object.
1524 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1525 *
1526 * This function is responsible for driver exit processing of setting up
1527 * diag loopback mode on device.
1528 */
1529static void
1530lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1531{
1532 struct Scsi_Host *shost;
1533 struct lpfc_vport **vports;
1534 int i;
1535
1536 vports = lpfc_create_vport_work_array(phba);
1537 if (vports) {
1538 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1539 shost = lpfc_shost_from_vport(vports[i]);
1540 scsi_unblock_requests(shost);
1541 }
1542 lpfc_destroy_vport_work_array(phba, vports);
1543 } else {
1544 shost = lpfc_shost_from_vport(phba->pport);
1545 scsi_unblock_requests(shost);
1546 }
1547 return;
1548}
1549
1550/**
1551 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
1552 * @phba: Pointer to HBA context object.
1553 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1554 *
1555 * This function is responsible for placing an sli3 port into diagnostic
1556 * loopback mode in order to perform a diagnostic loopback test.
1471 * All new scsi requests are blocked, a small delay is used to allow the 1557 * All new scsi requests are blocked, a small delay is used to allow the
1472 * scsi requests to complete then the link is brought down. If the link is 1558 * scsi requests to complete then the link is brought down. If the link is
1473 * is placed in loopback mode then scsi requests are again allowed 1559 * is placed in loopback mode then scsi requests are again allowed
@@ -1475,17 +1561,11 @@ send_mgmt_rsp_exit:
1475 * All of this is done in-line. 1561 * All of this is done in-line.
1476 */ 1562 */
1477static int 1563static int
1478lpfc_bsg_diag_mode(struct fc_bsg_job *job) 1564lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1479{ 1565{
1480 struct Scsi_Host *shost = job->shost;
1481 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1482 struct lpfc_hba *phba = vport->phba;
1483 struct diag_mode_set *loopback_mode; 1566 struct diag_mode_set *loopback_mode;
1484 struct lpfc_sli *psli = &phba->sli;
1485 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
1486 uint32_t link_flags; 1567 uint32_t link_flags;
1487 uint32_t timeout; 1568 uint32_t timeout;
1488 struct lpfc_vport **vports;
1489 LPFC_MBOXQ_t *pmboxq; 1569 LPFC_MBOXQ_t *pmboxq;
1490 int mbxstatus; 1570 int mbxstatus;
1491 int i = 0; 1571 int i = 0;
@@ -1494,53 +1574,33 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
1494 /* no data to return just the return code */ 1574 /* no data to return just the return code */
1495 job->reply->reply_payload_rcv_len = 0; 1575 job->reply->reply_payload_rcv_len = 0;
1496 1576
1497 if (job->request_len < 1577 if (job->request_len < sizeof(struct fc_bsg_request) +
1498 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) { 1578 sizeof(struct diag_mode_set)) {
1499 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1579 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1500 "2738 Received DIAG MODE request below minimum " 1580 "2738 Received DIAG MODE request size:%d "
1501 "size\n"); 1581 "below the minimum size:%d\n",
1582 job->request_len,
1583 (int)(sizeof(struct fc_bsg_request) +
1584 sizeof(struct diag_mode_set)));
1502 rc = -EINVAL; 1585 rc = -EINVAL;
1503 goto job_error; 1586 goto job_error;
1504 } 1587 }
1505 1588
1589 rc = lpfc_bsg_diag_mode_enter(phba, job);
1590 if (rc)
1591 goto job_error;
1592
1593 /* bring the link to diagnostic mode */
1506 loopback_mode = (struct diag_mode_set *) 1594 loopback_mode = (struct diag_mode_set *)
1507 job->request->rqst_data.h_vendor.vendor_cmd; 1595 job->request->rqst_data.h_vendor.vendor_cmd;
1508 link_flags = loopback_mode->type; 1596 link_flags = loopback_mode->type;
1509 timeout = loopback_mode->timeout * 100; 1597 timeout = loopback_mode->timeout * 100;
1510 1598
1511 if ((phba->link_state == LPFC_HBA_ERROR) ||
1512 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1513 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
1514 rc = -EACCES;
1515 goto job_error;
1516 }
1517
1518 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1599 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1519 if (!pmboxq) { 1600 if (!pmboxq) {
1520 rc = -ENOMEM; 1601 rc = -ENOMEM;
1521 goto job_error; 1602 goto loopback_mode_exit;
1522 }
1523
1524 vports = lpfc_create_vport_work_array(phba);
1525 if (vports) {
1526 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1527 shost = lpfc_shost_from_vport(vports[i]);
1528 scsi_block_requests(shost);
1529 }
1530
1531 lpfc_destroy_vport_work_array(phba, vports);
1532 } else {
1533 shost = lpfc_shost_from_vport(phba->pport);
1534 scsi_block_requests(shost);
1535 } 1603 }
1536
1537 while (pring->txcmplq_cnt) {
1538 if (i++ > 500) /* wait up to 5 seconds */
1539 break;
1540
1541 msleep(10);
1542 }
1543
1544 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1604 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1545 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; 1605 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1546 pmboxq->u.mb.mbxOwner = OWN_HOST; 1606 pmboxq->u.mb.mbxOwner = OWN_HOST;
@@ -1594,22 +1654,186 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
1594 rc = -ENODEV; 1654 rc = -ENODEV;
1595 1655
1596loopback_mode_exit: 1656loopback_mode_exit:
1597 vports = lpfc_create_vport_work_array(phba); 1657 lpfc_bsg_diag_mode_exit(phba);
1598 if (vports) { 1658
1599 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1659 /*
1600 shost = lpfc_shost_from_vport(vports[i]); 1660 * Let SLI layer release mboxq if mbox command completed after timeout.
1601 scsi_unblock_requests(shost); 1661 */
1662 if (mbxstatus != MBX_TIMEOUT)
1663 mempool_free(pmboxq, phba->mbox_mem_pool);
1664
1665job_error:
1666 /* make error code available to userspace */
1667 job->reply->result = rc;
1668 /* complete the job back to userspace if no error */
1669 if (rc == 0)
1670 job->job_done(job);
1671 return rc;
1672}
1673
1674/**
1675 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
1676 * @phba: Pointer to HBA context object.
1677 * @diag: Flag for set link to diag or nomral operation state.
1678 *
1679 * This function is responsible for issuing a sli4 mailbox command for setting
1680 * link to either diag state or normal operation state.
1681 */
1682static int
1683lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1684{
1685 LPFC_MBOXQ_t *pmboxq;
1686 struct lpfc_mbx_set_link_diag_state *link_diag_state;
1687 uint32_t req_len, alloc_len;
1688 int mbxstatus = MBX_SUCCESS, rc;
1689
1690 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1691 if (!pmboxq)
1692 return -ENOMEM;
1693
1694 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
1695 sizeof(struct lpfc_sli4_cfg_mhdr));
1696 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1697 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
1698 req_len, LPFC_SLI4_MBX_EMBED);
1699 if (alloc_len != req_len) {
1700 rc = -ENOMEM;
1701 goto link_diag_state_set_out;
1702 }
1703 link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1704 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1705 phba->sli4_hba.link_state.number);
1706 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1707 phba->sli4_hba.link_state.type);
1708 if (diag)
1709 bf_set(lpfc_mbx_set_diag_state_diag,
1710 &link_diag_state->u.req, 1);
1711 else
1712 bf_set(lpfc_mbx_set_diag_state_diag,
1713 &link_diag_state->u.req, 0);
1714
1715 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1716
1717 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
1718 rc = 0;
1719 else
1720 rc = -ENODEV;
1721
1722link_diag_state_set_out:
1723 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1724 mempool_free(pmboxq, phba->mbox_mem_pool);
1725
1726 return rc;
1727}
1728
1729/**
1730 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
1731 * @phba: Pointer to HBA context object.
1732 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1733 *
1734 * This function is responsible for placing an sli4 port into diagnostic
1735 * loopback mode in order to perform a diagnostic loopback test.
1736 */
1737static int
1738lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1739{
1740 struct diag_mode_set *loopback_mode;
1741 uint32_t link_flags, timeout, req_len, alloc_len;
1742 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1743 LPFC_MBOXQ_t *pmboxq = NULL;
1744 int mbxstatus, i, rc = 0;
1745
1746 /* no data to return just the return code */
1747 job->reply->reply_payload_rcv_len = 0;
1748
1749 if (job->request_len < sizeof(struct fc_bsg_request) +
1750 sizeof(struct diag_mode_set)) {
1751 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1752 "3011 Received DIAG MODE request size:%d "
1753 "below the minimum size:%d\n",
1754 job->request_len,
1755 (int)(sizeof(struct fc_bsg_request) +
1756 sizeof(struct diag_mode_set)));
1757 rc = -EINVAL;
1758 goto job_error;
1759 }
1760
1761 rc = lpfc_bsg_diag_mode_enter(phba, job);
1762 if (rc)
1763 goto job_error;
1764
1765 /* bring the link to diagnostic mode */
1766 loopback_mode = (struct diag_mode_set *)
1767 job->request->rqst_data.h_vendor.vendor_cmd;
1768 link_flags = loopback_mode->type;
1769 timeout = loopback_mode->timeout * 100;
1770
1771 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
1772 if (rc)
1773 goto loopback_mode_exit;
1774
1775 /* wait for link down before proceeding */
1776 i = 0;
1777 while (phba->link_state != LPFC_LINK_DOWN) {
1778 if (i++ > timeout) {
1779 rc = -ETIMEDOUT;
1780 goto loopback_mode_exit;
1781 }
1782 msleep(10);
1783 }
1784 /* set up loopback mode */
1785 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1786 if (!pmboxq) {
1787 rc = -ENOMEM;
1788 goto loopback_mode_exit;
1789 }
1790 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1791 sizeof(struct lpfc_sli4_cfg_mhdr));
1792 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1793 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
1794 req_len, LPFC_SLI4_MBX_EMBED);
1795 if (alloc_len != req_len) {
1796 rc = -ENOMEM;
1797 goto loopback_mode_exit;
1798 }
1799 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1800 bf_set(lpfc_mbx_set_diag_state_link_num,
1801 &link_diag_loopback->u.req, phba->sli4_hba.link_state.number);
1802 bf_set(lpfc_mbx_set_diag_state_link_type,
1803 &link_diag_loopback->u.req, phba->sli4_hba.link_state.type);
1804 if (link_flags == INTERNAL_LOOP_BACK)
1805 bf_set(lpfc_mbx_set_diag_lpbk_type,
1806 &link_diag_loopback->u.req,
1807 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
1808 else
1809 bf_set(lpfc_mbx_set_diag_lpbk_type,
1810 &link_diag_loopback->u.req,
1811 LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL);
1812
1813 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1814 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1815 rc = -ENODEV;
1816 else {
1817 phba->link_flag |= LS_LOOPBACK_MODE;
1818 /* wait for the link attention interrupt */
1819 msleep(100);
1820 i = 0;
1821 while (phba->link_state != LPFC_HBA_READY) {
1822 if (i++ > timeout) {
1823 rc = -ETIMEDOUT;
1824 break;
1825 }
1826 msleep(10);
1602 } 1827 }
1603 lpfc_destroy_vport_work_array(phba, vports);
1604 } else {
1605 shost = lpfc_shost_from_vport(phba->pport);
1606 scsi_unblock_requests(shost);
1607 } 1828 }
1608 1829
1830loopback_mode_exit:
1831 lpfc_bsg_diag_mode_exit(phba);
1832
1609 /* 1833 /*
1610 * Let SLI layer release mboxq if mbox command completed after timeout. 1834 * Let SLI layer release mboxq if mbox command completed after timeout.
1611 */ 1835 */
1612 if (mbxstatus != MBX_TIMEOUT) 1836 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1613 mempool_free(pmboxq, phba->mbox_mem_pool); 1837 mempool_free(pmboxq, phba->mbox_mem_pool);
1614 1838
1615job_error: 1839job_error:
@@ -1622,6 +1846,234 @@ job_error:
1622} 1846}
1623 1847
1624/** 1848/**
1849 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
1850 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1851 *
1852 * This function is responsible for responding to check and dispatch bsg diag
1853 * command from the user to proper driver action routines.
1854 */
1855static int
1856lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
1857{
1858 struct Scsi_Host *shost;
1859 struct lpfc_vport *vport;
1860 struct lpfc_hba *phba;
1861 int rc;
1862
1863 shost = job->shost;
1864 if (!shost)
1865 return -ENODEV;
1866 vport = (struct lpfc_vport *)job->shost->hostdata;
1867 if (!vport)
1868 return -ENODEV;
1869 phba = vport->phba;
1870 if (!phba)
1871 return -ENODEV;
1872
1873 if (phba->sli_rev < LPFC_SLI_REV4)
1874 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
1875 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1876 LPFC_SLI_INTF_IF_TYPE_2)
1877 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
1878 else
1879 rc = -ENODEV;
1880
1881 return rc;
1882
1883}
1884
1885/**
1886 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
1887 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
1888 *
1889 * This function is responsible for responding to check and dispatch bsg diag
1890 * command from the user to proper driver action routines.
1891 */
1892static int
1893lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
1894{
1895 struct Scsi_Host *shost;
1896 struct lpfc_vport *vport;
1897 struct lpfc_hba *phba;
1898 int rc;
1899
1900 shost = job->shost;
1901 if (!shost)
1902 return -ENODEV;
1903 vport = (struct lpfc_vport *)job->shost->hostdata;
1904 if (!vport)
1905 return -ENODEV;
1906 phba = vport->phba;
1907 if (!phba)
1908 return -ENODEV;
1909
1910 if (phba->sli_rev < LPFC_SLI_REV4)
1911 return -ENODEV;
1912 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
1913 LPFC_SLI_INTF_IF_TYPE_2)
1914 return -ENODEV;
1915
1916 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
1917
1918 if (!rc)
1919 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
1920
1921 return rc;
1922}
1923
1924/**
1925 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
1926 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
1927 *
1928 * This function is to perform SLI4 diag link test request from the user
1929 * applicaiton.
1930 */
1931static int
1932lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
1933{
1934 struct Scsi_Host *shost;
1935 struct lpfc_vport *vport;
1936 struct lpfc_hba *phba;
1937 LPFC_MBOXQ_t *pmboxq;
1938 struct sli4_link_diag *link_diag_test_cmd;
1939 uint32_t req_len, alloc_len;
1940 uint32_t timeout;
1941 struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
1942 union lpfc_sli4_cfg_shdr *shdr;
1943 uint32_t shdr_status, shdr_add_status;
1944 struct diag_status *diag_status_reply;
1945 int mbxstatus, rc = 0;
1946
1947 shost = job->shost;
1948 if (!shost) {
1949 rc = -ENODEV;
1950 goto job_error;
1951 }
1952 vport = (struct lpfc_vport *)job->shost->hostdata;
1953 if (!vport) {
1954 rc = -ENODEV;
1955 goto job_error;
1956 }
1957 phba = vport->phba;
1958 if (!phba) {
1959 rc = -ENODEV;
1960 goto job_error;
1961 }
1962
1963 if (phba->sli_rev < LPFC_SLI_REV4) {
1964 rc = -ENODEV;
1965 goto job_error;
1966 }
1967 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
1968 LPFC_SLI_INTF_IF_TYPE_2) {
1969 rc = -ENODEV;
1970 goto job_error;
1971 }
1972
1973 if (job->request_len < sizeof(struct fc_bsg_request) +
1974 sizeof(struct sli4_link_diag)) {
1975 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1976 "3013 Received LINK DIAG TEST request "
1977 " size:%d below the minimum size:%d\n",
1978 job->request_len,
1979 (int)(sizeof(struct fc_bsg_request) +
1980 sizeof(struct sli4_link_diag)));
1981 rc = -EINVAL;
1982 goto job_error;
1983 }
1984
1985 rc = lpfc_bsg_diag_mode_enter(phba, job);
1986 if (rc)
1987 goto job_error;
1988
1989 link_diag_test_cmd = (struct sli4_link_diag *)
1990 job->request->rqst_data.h_vendor.vendor_cmd;
1991 timeout = link_diag_test_cmd->timeout * 100;
1992
1993 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
1994
1995 if (rc)
1996 goto job_error;
1997
1998 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1999 if (!pmboxq) {
2000 rc = -ENOMEM;
2001 goto link_diag_test_exit;
2002 }
2003
2004 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
2005 sizeof(struct lpfc_sli4_cfg_mhdr));
2006 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2007 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
2008 req_len, LPFC_SLI4_MBX_EMBED);
2009 if (alloc_len != req_len) {
2010 rc = -ENOMEM;
2011 goto link_diag_test_exit;
2012 }
2013 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2014 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2015 phba->sli4_hba.link_state.number);
2016 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2017 phba->sli4_hba.link_state.type);
2018 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2019 link_diag_test_cmd->test_id);
2020 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
2021 link_diag_test_cmd->loops);
2022 bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
2023 link_diag_test_cmd->test_version);
2024 bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
2025 link_diag_test_cmd->error_action);
2026
2027 mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2028
2029 shdr = (union lpfc_sli4_cfg_shdr *)
2030 &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
2031 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2032 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2033 if (shdr_status || shdr_add_status || mbxstatus) {
2034 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2035 "3010 Run link diag test mailbox failed with "
2036 "mbx_status x%x status x%x, add_status x%x\n",
2037 mbxstatus, shdr_status, shdr_add_status);
2038 }
2039
2040 diag_status_reply = (struct diag_status *)
2041 job->reply->reply_data.vendor_reply.vendor_rsp;
2042
2043 if (job->reply_len <
2044 sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
2045 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2046 "3012 Received Run link diag test reply "
2047 "below minimum size (%d): reply_len:%d\n",
2048 (int)(sizeof(struct fc_bsg_request) +
2049 sizeof(struct diag_status)),
2050 job->reply_len);
2051 rc = -EINVAL;
2052 goto job_error;
2053 }
2054
2055 diag_status_reply->mbox_status = mbxstatus;
2056 diag_status_reply->shdr_status = shdr_status;
2057 diag_status_reply->shdr_add_status = shdr_add_status;
2058
2059link_diag_test_exit:
2060 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2061
2062 if (pmboxq)
2063 mempool_free(pmboxq, phba->mbox_mem_pool);
2064
2065 lpfc_bsg_diag_mode_exit(phba);
2066
2067job_error:
2068 /* make error code available to userspace */
2069 job->reply->result = rc;
2070 /* complete the job back to userspace if no error */
2071 if (rc == 0)
2072 job->job_done(job);
2073 return rc;
2074}
2075
2076/**
1625 * lpfcdiag_loop_self_reg - obtains a remote port login id 2077 * lpfcdiag_loop_self_reg - obtains a remote port login id
1626 * @phba: Pointer to HBA context object 2078 * @phba: Pointer to HBA context object
1627 * @rpi: Pointer to a remote port login id 2079 * @rpi: Pointer to a remote port login id
@@ -1851,6 +2303,86 @@ err_get_xri_exit:
1851} 2303}
1852 2304
1853/** 2305/**
2306 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
2307 * @phba: Pointer to HBA context object
2308 *
2309 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
2310 * retruns the pointer to the buffer.
2311 **/
2312static struct lpfc_dmabuf *
2313lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2314{
2315 struct lpfc_dmabuf *dmabuf;
2316 struct pci_dev *pcidev = phba->pcidev;
2317
2318 /* allocate dma buffer struct */
2319 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2320 if (!dmabuf)
2321 return NULL;
2322
2323 INIT_LIST_HEAD(&dmabuf->list);
2324
2325 /* now, allocate dma buffer */
2326 dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2327 &(dmabuf->phys), GFP_KERNEL);
2328
2329 if (!dmabuf->virt) {
2330 kfree(dmabuf);
2331 return NULL;
2332 }
2333 memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
2334
2335 return dmabuf;
2336}
2337
2338/**
2339 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
2340 * @phba: Pointer to HBA context object.
2341 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
2342 *
2343 * This routine just simply frees a dma buffer and its associated buffer
2344 * descriptor referred by @dmabuf.
2345 **/
2346static void
2347lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
2348{
2349 struct pci_dev *pcidev = phba->pcidev;
2350
2351 if (!dmabuf)
2352 return;
2353
2354 if (dmabuf->virt)
2355 dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2356 dmabuf->virt, dmabuf->phys);
2357 kfree(dmabuf);
2358 return;
2359}
2360
2361/**
2362 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
2363 * @phba: Pointer to HBA context object.
2364 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
2365 *
2366 * This routine just simply frees all dma buffers and their associated buffer
2367 * descriptors referred by @dmabuf_list.
2368 **/
2369static void
2370lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
2371 struct list_head *dmabuf_list)
2372{
2373 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2374
2375 if (list_empty(dmabuf_list))
2376 return;
2377
2378 list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
2379 list_del_init(&dmabuf->list);
2380 lpfc_bsg_dma_page_free(phba, dmabuf);
2381 }
2382 return;
2383}
2384
2385/**
1854 * diag_cmd_data_alloc - fills in a bde struct with dma buffers 2386 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
1855 * @phba: Pointer to HBA context object 2387 * @phba: Pointer to HBA context object
1856 * @bpl: Pointer to 64 bit bde structure 2388 * @bpl: Pointer to 64 bit bde structure
@@ -2067,7 +2599,7 @@ err_post_rxbufs_exit:
2067} 2599}
2068 2600
2069/** 2601/**
2070 * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself 2602 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
2071 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job 2603 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2072 * 2604 *
2073 * This function receives a user data buffer to be transmitted and received on 2605 * This function receives a user data buffer to be transmitted and received on
@@ -2086,7 +2618,7 @@ err_post_rxbufs_exit:
2086 * of loopback mode. 2618 * of loopback mode.
2087 **/ 2619 **/
2088static int 2620static int
2089lpfc_bsg_diag_test(struct fc_bsg_job *job) 2621lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
2090{ 2622{
2091 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 2623 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2092 struct lpfc_hba *phba = vport->phba; 2624 struct lpfc_hba *phba = vport->phba;
@@ -2411,7 +2943,7 @@ job_error:
2411} 2943}
2412 2944
2413/** 2945/**
2414 * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler 2946 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
2415 * @phba: Pointer to HBA context object. 2947 * @phba: Pointer to HBA context object.
2416 * @pmboxq: Pointer to mailbox command. 2948 * @pmboxq: Pointer to mailbox command.
2417 * 2949 *
@@ -2422,15 +2954,13 @@ job_error:
2422 * of the mailbox. 2954 * of the mailbox.
2423 **/ 2955 **/
2424void 2956void
2425lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2957lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2426{ 2958{
2427 struct bsg_job_data *dd_data; 2959 struct bsg_job_data *dd_data;
2428 struct fc_bsg_job *job; 2960 struct fc_bsg_job *job;
2429 struct lpfc_mbx_nembed_cmd *nembed_sge;
2430 uint32_t size; 2961 uint32_t size;
2431 unsigned long flags; 2962 unsigned long flags;
2432 uint8_t *to; 2963 uint8_t *pmb, *pmb_buf;
2433 uint8_t *from;
2434 2964
2435 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2965 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2436 dd_data = pmboxq->context1; 2966 dd_data = pmboxq->context1;
@@ -2440,62 +2970,21 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2440 return; 2970 return;
2441 } 2971 }
2442 2972
2443 /* build the outgoing buffer to do an sg copy 2973 /*
2444 * the format is the response mailbox followed by any extended 2974 * The outgoing buffer is readily referred from the dma buffer,
2445 * mailbox data 2975 * just need to get header part from mailboxq structure.
2446 */ 2976 */
2447 from = (uint8_t *)&pmboxq->u.mb; 2977 pmb = (uint8_t *)&pmboxq->u.mb;
2448 to = (uint8_t *)dd_data->context_un.mbox.mb; 2978 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
2449 memcpy(to, from, sizeof(MAILBOX_t)); 2979 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
2450 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) {
2451 /* copy the extended data if any, count is in words */
2452 if (dd_data->context_un.mbox.outExtWLen) {
2453 from = (uint8_t *)dd_data->context_un.mbox.ext;
2454 to += sizeof(MAILBOX_t);
2455 size = dd_data->context_un.mbox.outExtWLen *
2456 sizeof(uint32_t);
2457 memcpy(to, from, size);
2458 } else if (pmboxq->u.mb.mbxCommand == MBX_RUN_BIU_DIAG64) {
2459 from = (uint8_t *)dd_data->context_un.mbox.
2460 dmp->dma.virt;
2461 to += sizeof(MAILBOX_t);
2462 size = dd_data->context_un.mbox.dmp->size;
2463 memcpy(to, from, size);
2464 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
2465 (pmboxq->u.mb.mbxCommand == MBX_DUMP_MEMORY)) {
2466 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
2467 virt;
2468 to += sizeof(MAILBOX_t);
2469 size = pmboxq->u.mb.un.varWords[5];
2470 memcpy(to, from, size);
2471 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
2472 (pmboxq->u.mb.mbxCommand == MBX_SLI4_CONFIG)) {
2473 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
2474 &pmboxq->u.mb.un.varWords[0];
2475
2476 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
2477 virt;
2478 to += sizeof(MAILBOX_t);
2479 size = nembed_sge->sge[0].length;
2480 memcpy(to, from, size);
2481 } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) {
2482 from = (uint8_t *)dd_data->context_un.
2483 mbox.dmp->dma.virt;
2484 to += sizeof(MAILBOX_t);
2485 size = dd_data->context_un.mbox.dmp->size;
2486 memcpy(to, from, size);
2487 }
2488 }
2489 2980
2490 from = (uint8_t *)dd_data->context_un.mbox.mb;
2491 job = dd_data->context_un.mbox.set_job; 2981 job = dd_data->context_un.mbox.set_job;
2492 if (job) { 2982 if (job) {
2493 size = job->reply_payload.payload_len; 2983 size = job->reply_payload.payload_len;
2494 job->reply->reply_payload_rcv_len = 2984 job->reply->reply_payload_rcv_len =
2495 sg_copy_from_buffer(job->reply_payload.sg_list, 2985 sg_copy_from_buffer(job->reply_payload.sg_list,
2496 job->reply_payload.sg_cnt, 2986 job->reply_payload.sg_cnt,
2497 from, size); 2987 pmb_buf, size);
2498 job->reply->result = 0;
2499 /* need to hold the lock until we set job->dd_data to NULL 2988 /* need to hold the lock until we set job->dd_data to NULL
2500 * to hold off the timeout handler returning to the mid-layer 2989 * to hold off the timeout handler returning to the mid-layer
2501 * while we are still processing the job. 2990 * while we are still processing the job.
@@ -2503,28 +2992,19 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2503 job->dd_data = NULL; 2992 job->dd_data = NULL;
2504 dd_data->context_un.mbox.set_job = NULL; 2993 dd_data->context_un.mbox.set_job = NULL;
2505 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2994 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2506 job->job_done(job);
2507 } else { 2995 } else {
2508 dd_data->context_un.mbox.set_job = NULL; 2996 dd_data->context_un.mbox.set_job = NULL;
2509 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2997 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2510 } 2998 }
2511 2999
2512 kfree(dd_data->context_un.mbox.mb);
2513 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); 3000 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
2514 kfree(dd_data->context_un.mbox.ext); 3001 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
2515 if (dd_data->context_un.mbox.dmp) {
2516 dma_free_coherent(&phba->pcidev->dev,
2517 dd_data->context_un.mbox.dmp->size,
2518 dd_data->context_un.mbox.dmp->dma.virt,
2519 dd_data->context_un.mbox.dmp->dma.phys);
2520 kfree(dd_data->context_un.mbox.dmp);
2521 }
2522 if (dd_data->context_un.mbox.rxbmp) {
2523 lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt,
2524 dd_data->context_un.mbox.rxbmp->phys);
2525 kfree(dd_data->context_un.mbox.rxbmp);
2526 }
2527 kfree(dd_data); 3002 kfree(dd_data);
3003
3004 if (job) {
3005 job->reply->result = 0;
3006 job->job_done(job);
3007 }
2528 return; 3008 return;
2529} 3009}
2530 3010
@@ -2619,6 +3099,1006 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
2619} 3099}
2620 3100
2621/** 3101/**
3102 * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
3103 * @phba: Pointer to HBA context object.
3104 *
3105 * This is routine clean up and reset BSG handling of multi-buffer mbox
3106 * command session.
3107 **/
3108static void
3109lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3110{
3111 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
3112 return;
3113
3114 /* free all memory, including dma buffers */
3115 lpfc_bsg_dma_page_list_free(phba,
3116 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3117 lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
3118 /* multi-buffer write mailbox command pass-through complete */
3119 memset((char *)&phba->mbox_ext_buf_ctx, 0,
3120 sizeof(struct lpfc_mbox_ext_buf_ctx));
3121 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3122
3123 return;
3124}
3125
3126/**
3127 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
3128 * @phba: Pointer to HBA context object.
3129 * @pmboxq: Pointer to mailbox command.
3130 *
3131 * This is routine handles BSG job for mailbox commands completions with
3132 * multiple external buffers.
3133 **/
3134static struct fc_bsg_job *
3135lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3136{
3137 struct bsg_job_data *dd_data;
3138 struct fc_bsg_job *job;
3139 uint8_t *pmb, *pmb_buf;
3140 unsigned long flags;
3141 uint32_t size;
3142 int rc = 0;
3143
3144 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3145 dd_data = pmboxq->context1;
3146 /* has the job already timed out? */
3147 if (!dd_data) {
3148 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3149 job = NULL;
3150 goto job_done_out;
3151 }
3152
3153 /*
3154 * The outgoing buffer is readily referred from the dma buffer,
3155 * just need to get header part from mailboxq structure.
3156 */
3157 pmb = (uint8_t *)&pmboxq->u.mb;
3158 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3159 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3160
3161 job = dd_data->context_un.mbox.set_job;
3162 if (job) {
3163 size = job->reply_payload.payload_len;
3164 job->reply->reply_payload_rcv_len =
3165 sg_copy_from_buffer(job->reply_payload.sg_list,
3166 job->reply_payload.sg_cnt,
3167 pmb_buf, size);
3168 /* result for successful */
3169 job->reply->result = 0;
3170 job->dd_data = NULL;
3171 /* need to hold the lock util we set job->dd_data to NULL
3172 * to hold off the timeout handler from midlayer to take
3173 * any action.
3174 */
3175 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3176 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3177 "2937 SLI_CONFIG ext-buffer maibox command "
3178 "(x%x/x%x) complete bsg job done, bsize:%d\n",
3179 phba->mbox_ext_buf_ctx.nembType,
3180 phba->mbox_ext_buf_ctx.mboxType, size);
3181 } else
3182 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3183
3184job_done_out:
3185 if (!job)
3186 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3187 "2938 SLI_CONFIG ext-buffer maibox "
3188 "command (x%x/x%x) failure, rc:x%x\n",
3189 phba->mbox_ext_buf_ctx.nembType,
3190 phba->mbox_ext_buf_ctx.mboxType, rc);
3191 /* state change */
3192 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3193 kfree(dd_data);
3194
3195 return job;
3196}
3197
3198/**
3199 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
3200 * @phba: Pointer to HBA context object.
3201 * @pmboxq: Pointer to mailbox command.
3202 *
3203 * This is completion handler function for mailbox read commands with multiple
3204 * external buffers.
3205 **/
3206static void
3207lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3208{
3209 struct fc_bsg_job *job;
3210
3211 /* handle the BSG job with mailbox command */
3212 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
3213 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3214
3215 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3216 "2939 SLI_CONFIG ext-buffer rd maibox command "
3217 "complete, ctxState:x%x, mbxStatus:x%x\n",
3218 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3219
3220 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3221
3222 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3223 lpfc_bsg_mbox_ext_session_reset(phba);
3224
3225 /* free base driver mailbox structure memory */
3226 mempool_free(pmboxq, phba->mbox_mem_pool);
3227
3228 /* complete the bsg job if we have it */
3229 if (job)
3230 job->job_done(job);
3231
3232 return;
3233}
3234
3235/**
3236 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
3237 * @phba: Pointer to HBA context object.
3238 * @pmboxq: Pointer to mailbox command.
3239 *
3240 * This is completion handler function for mailbox write commands with multiple
3241 * external buffers.
3242 **/
3243static void
3244lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3245{
3246 struct fc_bsg_job *job;
3247
3248 /* handle the BSG job with the mailbox command */
3249 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
3250 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3251
3252 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3253 "2940 SLI_CONFIG ext-buffer wr maibox command "
3254 "complete, ctxState:x%x, mbxStatus:x%x\n",
3255 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3256
3257 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3258
3259 /* free all memory, including dma buffers */
3260 mempool_free(pmboxq, phba->mbox_mem_pool);
3261 lpfc_bsg_mbox_ext_session_reset(phba);
3262
3263 /* complete the bsg job if we have it */
3264 if (job)
3265 job->job_done(job);
3266
3267 return;
3268}
3269
3270static void
3271lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3272 uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
3273 struct lpfc_dmabuf *ext_dmabuf)
3274{
3275 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3276
3277 /* pointer to the start of mailbox command */
3278 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
3279
3280 if (nemb_tp == nemb_mse) {
3281 if (index == 0) {
3282 sli_cfg_mbx->un.sli_config_emb0_subsys.
3283 mse[index].pa_hi =
3284 putPaddrHigh(mbx_dmabuf->phys +
3285 sizeof(MAILBOX_t));
3286 sli_cfg_mbx->un.sli_config_emb0_subsys.
3287 mse[index].pa_lo =
3288 putPaddrLow(mbx_dmabuf->phys +
3289 sizeof(MAILBOX_t));
3290 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3291 "2943 SLI_CONFIG(mse)[%d], "
3292 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3293 index,
3294 sli_cfg_mbx->un.sli_config_emb0_subsys.
3295 mse[index].buf_len,
3296 sli_cfg_mbx->un.sli_config_emb0_subsys.
3297 mse[index].pa_hi,
3298 sli_cfg_mbx->un.sli_config_emb0_subsys.
3299 mse[index].pa_lo);
3300 } else {
3301 sli_cfg_mbx->un.sli_config_emb0_subsys.
3302 mse[index].pa_hi =
3303 putPaddrHigh(ext_dmabuf->phys);
3304 sli_cfg_mbx->un.sli_config_emb0_subsys.
3305 mse[index].pa_lo =
3306 putPaddrLow(ext_dmabuf->phys);
3307 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3308 "2944 SLI_CONFIG(mse)[%d], "
3309 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3310 index,
3311 sli_cfg_mbx->un.sli_config_emb0_subsys.
3312 mse[index].buf_len,
3313 sli_cfg_mbx->un.sli_config_emb0_subsys.
3314 mse[index].pa_hi,
3315 sli_cfg_mbx->un.sli_config_emb0_subsys.
3316 mse[index].pa_lo);
3317 }
3318 } else {
3319 if (index == 0) {
3320 sli_cfg_mbx->un.sli_config_emb1_subsys.
3321 hbd[index].pa_hi =
3322 putPaddrHigh(mbx_dmabuf->phys +
3323 sizeof(MAILBOX_t));
3324 sli_cfg_mbx->un.sli_config_emb1_subsys.
3325 hbd[index].pa_lo =
3326 putPaddrLow(mbx_dmabuf->phys +
3327 sizeof(MAILBOX_t));
3328 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3329 "3007 SLI_CONFIG(hbd)[%d], "
3330 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3331 index,
3332 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3333 &sli_cfg_mbx->un.
3334 sli_config_emb1_subsys.hbd[index]),
3335 sli_cfg_mbx->un.sli_config_emb1_subsys.
3336 hbd[index].pa_hi,
3337 sli_cfg_mbx->un.sli_config_emb1_subsys.
3338 hbd[index].pa_lo);
3339
3340 } else {
3341 sli_cfg_mbx->un.sli_config_emb1_subsys.
3342 hbd[index].pa_hi =
3343 putPaddrHigh(ext_dmabuf->phys);
3344 sli_cfg_mbx->un.sli_config_emb1_subsys.
3345 hbd[index].pa_lo =
3346 putPaddrLow(ext_dmabuf->phys);
3347 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3348 "3008 SLI_CONFIG(hbd)[%d], "
3349 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3350 index,
3351 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3352 &sli_cfg_mbx->un.
3353 sli_config_emb1_subsys.hbd[index]),
3354 sli_cfg_mbx->un.sli_config_emb1_subsys.
3355 hbd[index].pa_hi,
3356 sli_cfg_mbx->un.sli_config_emb1_subsys.
3357 hbd[index].pa_lo);
3358 }
3359 }
3360 return;
3361}
3362
3363/**
3364 * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
3365 * @phba: Pointer to HBA context object.
3366 * @mb: Pointer to a BSG mailbox object.
3367 * @nemb_tp: Enumerate of non-embedded mailbox command type.
3368 * @dmabuff: Pointer to a DMA buffer descriptor.
3369 *
3370 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
3371 * non-embedded external bufffers.
3372 **/
3373static int
3374lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3375 enum nemb_type nemb_tp,
3376 struct lpfc_dmabuf *dmabuf)
3377{
3378 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3379 struct dfc_mbox_req *mbox_req;
3380 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
3381 uint32_t ext_buf_cnt, ext_buf_index;
3382 struct lpfc_dmabuf *ext_dmabuf = NULL;
3383 struct bsg_job_data *dd_data = NULL;
3384 LPFC_MBOXQ_t *pmboxq = NULL;
3385 MAILBOX_t *pmb;
3386 uint8_t *pmbx;
3387 int rc, i;
3388
3389 mbox_req =
3390 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3391
3392 /* pointer to the start of mailbox command */
3393 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3394
3395 if (nemb_tp == nemb_mse) {
3396 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3397 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3398 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3399 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3400 "2945 Handled SLI_CONFIG(mse) rd, "
3401 "ext_buf_cnt(%d) out of range(%d)\n",
3402 ext_buf_cnt,
3403 LPFC_MBX_SLI_CONFIG_MAX_MSE);
3404 rc = -ERANGE;
3405 goto job_error;
3406 }
3407 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3408 "2941 Handled SLI_CONFIG(mse) rd, "
3409 "ext_buf_cnt:%d\n", ext_buf_cnt);
3410 } else {
3411 /* sanity check on interface type for support */
3412 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3413 LPFC_SLI_INTF_IF_TYPE_2) {
3414 rc = -ENODEV;
3415 goto job_error;
3416 }
3417 /* nemb_tp == nemb_hbd */
3418 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3419 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3420 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3421 "2946 Handled SLI_CONFIG(hbd) rd, "
3422 "ext_buf_cnt(%d) out of range(%d)\n",
3423 ext_buf_cnt,
3424 LPFC_MBX_SLI_CONFIG_MAX_HBD);
3425 rc = -ERANGE;
3426 goto job_error;
3427 }
3428 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3429 "2942 Handled SLI_CONFIG(hbd) rd, "
3430 "ext_buf_cnt:%d\n", ext_buf_cnt);
3431 }
3432
3433 /* reject non-embedded mailbox command with none external buffer */
3434 if (ext_buf_cnt == 0) {
3435 rc = -EPERM;
3436 goto job_error;
3437 } else if (ext_buf_cnt > 1) {
3438 /* additional external read buffers */
3439 for (i = 1; i < ext_buf_cnt; i++) {
3440 ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
3441 if (!ext_dmabuf) {
3442 rc = -ENOMEM;
3443 goto job_error;
3444 }
3445 list_add_tail(&ext_dmabuf->list,
3446 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3447 }
3448 }
3449
3450 /* bsg tracking structure */
3451 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3452 if (!dd_data) {
3453 rc = -ENOMEM;
3454 goto job_error;
3455 }
3456
3457 /* mailbox command structure for base driver */
3458 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3459 if (!pmboxq) {
3460 rc = -ENOMEM;
3461 goto job_error;
3462 }
3463 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3464
3465 /* for the first external buffer */
3466 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3467
3468 /* for the rest of external buffer descriptors if any */
3469 if (ext_buf_cnt > 1) {
3470 ext_buf_index = 1;
3471 list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
3472 &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
3473 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
3474 ext_buf_index, dmabuf,
3475 curr_dmabuf);
3476 ext_buf_index++;
3477 }
3478 }
3479
3480 /* construct base driver mbox command */
3481 pmb = &pmboxq->u.mb;
3482 pmbx = (uint8_t *)dmabuf->virt;
3483 memcpy(pmb, pmbx, sizeof(*pmb));
3484 pmb->mbxOwner = OWN_HOST;
3485 pmboxq->vport = phba->pport;
3486
3487 /* multi-buffer handling context */
3488 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3489 phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
3490 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3491 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3492 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3493 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3494
3495 /* callback for multi-buffer read mailbox command */
3496 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
3497
3498 /* context fields to callback function */
3499 pmboxq->context1 = dd_data;
3500 dd_data->type = TYPE_MBOX;
3501 dd_data->context_un.mbox.pmboxq = pmboxq;
3502 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
3503 dd_data->context_un.mbox.set_job = job;
3504 job->dd_data = dd_data;
3505
3506 /* state change */
3507 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3508
3509 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3510 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3511 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3512 "2947 Issued SLI_CONFIG ext-buffer "
3513 "maibox command, rc:x%x\n", rc);
3514 return 1;
3515 }
3516 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3517 "2948 Failed to issue SLI_CONFIG ext-buffer "
3518 "maibox command, rc:x%x\n", rc);
3519 rc = -EPIPE;
3520
3521job_error:
3522 if (pmboxq)
3523 mempool_free(pmboxq, phba->mbox_mem_pool);
3524 lpfc_bsg_dma_page_list_free(phba,
3525 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3526 kfree(dd_data);
3527 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
3528 return rc;
3529}
3530
3531/**
3532 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
3533 * @phba: Pointer to HBA context object.
3534 * @mb: Pointer to a BSG mailbox object.
3535 * @dmabuff: Pointer to a DMA buffer descriptor.
3536 *
3537 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
3538 * non-embedded external bufffers.
3539 **/
3540static int
3541lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3542 enum nemb_type nemb_tp,
3543 struct lpfc_dmabuf *dmabuf)
3544{
3545 struct dfc_mbox_req *mbox_req;
3546 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3547 uint32_t ext_buf_cnt;
3548 struct bsg_job_data *dd_data = NULL;
3549 LPFC_MBOXQ_t *pmboxq = NULL;
3550 MAILBOX_t *pmb;
3551 uint8_t *mbx;
3552 int rc = 0, i;
3553
3554 mbox_req =
3555 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3556
3557 /* pointer to the start of mailbox command */
3558 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3559
3560 if (nemb_tp == nemb_mse) {
3561 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3562 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3563 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3564 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3565 "2953 Handled SLI_CONFIG(mse) wr, "
3566 "ext_buf_cnt(%d) out of range(%d)\n",
3567 ext_buf_cnt,
3568 LPFC_MBX_SLI_CONFIG_MAX_MSE);
3569 return -ERANGE;
3570 }
3571 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3572 "2949 Handled SLI_CONFIG(mse) wr, "
3573 "ext_buf_cnt:%d\n", ext_buf_cnt);
3574 } else {
3575 /* sanity check on interface type for support */
3576 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3577 LPFC_SLI_INTF_IF_TYPE_2)
3578 return -ENODEV;
3579 /* nemb_tp == nemb_hbd */
3580 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3581 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3582 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3583 "2954 Handled SLI_CONFIG(hbd) wr, "
3584 "ext_buf_cnt(%d) out of range(%d)\n",
3585 ext_buf_cnt,
3586 LPFC_MBX_SLI_CONFIG_MAX_HBD);
3587 return -ERANGE;
3588 }
3589 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3590 "2950 Handled SLI_CONFIG(hbd) wr, "
3591 "ext_buf_cnt:%d\n", ext_buf_cnt);
3592 }
3593
3594 if (ext_buf_cnt == 0)
3595 return -EPERM;
3596
3597 /* for the first external buffer */
3598 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3599
3600 /* log for looking forward */
3601 for (i = 1; i < ext_buf_cnt; i++) {
3602 if (nemb_tp == nemb_mse)
3603 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3604 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
3605 i, sli_cfg_mbx->un.sli_config_emb0_subsys.
3606 mse[i].buf_len);
3607 else
3608 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3609 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
3610 i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3611 &sli_cfg_mbx->un.sli_config_emb1_subsys.
3612 hbd[i]));
3613 }
3614
3615 /* multi-buffer handling context */
3616 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3617 phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
3618 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3619 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3620 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3621 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3622
3623 if (ext_buf_cnt == 1) {
3624 /* bsg tracking structure */
3625 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3626 if (!dd_data) {
3627 rc = -ENOMEM;
3628 goto job_error;
3629 }
3630
3631 /* mailbox command structure for base driver */
3632 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3633 if (!pmboxq) {
3634 rc = -ENOMEM;
3635 goto job_error;
3636 }
3637 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3638 pmb = &pmboxq->u.mb;
3639 mbx = (uint8_t *)dmabuf->virt;
3640 memcpy(pmb, mbx, sizeof(*pmb));
3641 pmb->mbxOwner = OWN_HOST;
3642 pmboxq->vport = phba->pport;
3643
3644 /* callback for multi-buffer read mailbox command */
3645 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
3646
3647 /* context fields to callback function */
3648 pmboxq->context1 = dd_data;
3649 dd_data->type = TYPE_MBOX;
3650 dd_data->context_un.mbox.pmboxq = pmboxq;
3651 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
3652 dd_data->context_un.mbox.set_job = job;
3653 job->dd_data = dd_data;
3654
3655 /* state change */
3656 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3657
3658 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3659 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3660 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3661 "2955 Issued SLI_CONFIG ext-buffer "
3662 "maibox command, rc:x%x\n", rc);
3663 return 1;
3664 }
3665 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3666 "2956 Failed to issue SLI_CONFIG ext-buffer "
3667 "maibox command, rc:x%x\n", rc);
3668 rc = -EPIPE;
3669 }
3670
3671job_error:
3672 if (pmboxq)
3673 mempool_free(pmboxq, phba->mbox_mem_pool);
3674 kfree(dd_data);
3675
3676 return rc;
3677}
3678
3679/**
3680 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
3681 * @phba: Pointer to HBA context object.
3682 * @mb: Pointer to a BSG mailbox object.
3683 * @dmabuff: Pointer to a DMA buffer descriptor.
3684 *
3685 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
3686 * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
3687 * with embedded sussystem 0x1 and opcodes with external HBDs.
3688 **/
3689static int
3690lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3691 struct lpfc_dmabuf *dmabuf)
3692{
3693 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3694 uint32_t subsys;
3695 uint32_t opcode;
3696 int rc = SLI_CONFIG_NOT_HANDLED;
3697
3698 /* state change */
3699 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
3700
3701 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3702
3703 if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3704 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3705 subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
3706 &sli_cfg_mbx->un.sli_config_emb0_subsys);
3707 opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
3708 &sli_cfg_mbx->un.sli_config_emb0_subsys);
3709 if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
3710 switch (opcode) {
3711 case FCOE_OPCODE_READ_FCF:
3712 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3713 "2957 Handled SLI_CONFIG "
3714 "subsys_fcoe, opcode:x%x\n",
3715 opcode);
3716 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
3717 nemb_mse, dmabuf);
3718 break;
3719 case FCOE_OPCODE_ADD_FCF:
3720 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3721 "2958 Handled SLI_CONFIG "
3722 "subsys_fcoe, opcode:x%x\n",
3723 opcode);
3724 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
3725 nemb_mse, dmabuf);
3726 break;
3727 default:
3728 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3729 "2959 Not handled SLI_CONFIG "
3730 "subsys_fcoe, opcode:x%x\n",
3731 opcode);
3732 rc = SLI_CONFIG_NOT_HANDLED;
3733 break;
3734 }
3735 } else {
3736 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3737 "2977 Handled SLI_CONFIG "
3738 "subsys:x%d, opcode:x%x\n",
3739 subsys, opcode);
3740 rc = SLI_CONFIG_NOT_HANDLED;
3741 }
3742 } else {
3743 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
3744 &sli_cfg_mbx->un.sli_config_emb1_subsys);
3745 opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
3746 &sli_cfg_mbx->un.sli_config_emb1_subsys);
3747 if (subsys == SLI_CONFIG_SUBSYS_COMN) {
3748 switch (opcode) {
3749 case COMN_OPCODE_READ_OBJECT:
3750 case COMN_OPCODE_READ_OBJECT_LIST:
3751 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3752 "2960 Handled SLI_CONFIG "
3753 "subsys_comn, opcode:x%x\n",
3754 opcode);
3755 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
3756 nemb_hbd, dmabuf);
3757 break;
3758 case COMN_OPCODE_WRITE_OBJECT:
3759 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3760 "2961 Handled SLI_CONFIG "
3761 "subsys_comn, opcode:x%x\n",
3762 opcode);
3763 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
3764 nemb_hbd, dmabuf);
3765 break;
3766 default:
3767 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3768 "2962 Not handled SLI_CONFIG "
3769 "subsys_comn, opcode:x%x\n",
3770 opcode);
3771 rc = SLI_CONFIG_NOT_HANDLED;
3772 break;
3773 }
3774 } else {
3775 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3776 "2978 Handled SLI_CONFIG "
3777 "subsys:x%d, opcode:x%x\n",
3778 subsys, opcode);
3779 rc = SLI_CONFIG_NOT_HANDLED;
3780 }
3781 }
3782 return rc;
3783}
3784
3785/**
3786 * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
3787 * @phba: Pointer to HBA context object.
3788 *
3789 * This routine is for requesting to abort a pass-through mailbox command with
3790 * multiple external buffers due to error condition.
3791 **/
3792static void
3793lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
3794{
3795 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
3796 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
3797 else
3798 lpfc_bsg_mbox_ext_session_reset(phba);
3799 return;
3800}
3801
3802/**
3803 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
3804 * @phba: Pointer to HBA context object.
3805 * @dmabuf: Pointer to a DMA buffer descriptor.
3806 *
3807 * This routine extracts the next mailbox read external buffer back to
3808 * user space through BSG.
3809 **/
3810static int
3811lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
3812{
3813 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3814 struct lpfc_dmabuf *dmabuf;
3815 uint8_t *pbuf;
3816 uint32_t size;
3817 uint32_t index;
3818
3819 index = phba->mbox_ext_buf_ctx.seqNum;
3820 phba->mbox_ext_buf_ctx.seqNum++;
3821
3822 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
3823 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
3824
3825 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
3826 size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
3827 &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
3828 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3829 "2963 SLI_CONFIG (mse) ext-buffer rd get "
3830 "buffer[%d], size:%d\n", index, size);
3831 } else {
3832 size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3833 &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
3834 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3835 "2964 SLI_CONFIG (hbd) ext-buffer rd get "
3836 "buffer[%d], size:%d\n", index, size);
3837 }
3838 if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
3839 return -EPIPE;
3840 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
3841 struct lpfc_dmabuf, list);
3842 list_del_init(&dmabuf->list);
3843 pbuf = (uint8_t *)dmabuf->virt;
3844 job->reply->reply_payload_rcv_len =
3845 sg_copy_from_buffer(job->reply_payload.sg_list,
3846 job->reply_payload.sg_cnt,
3847 pbuf, size);
3848
3849 lpfc_bsg_dma_page_free(phba, dmabuf);
3850
3851 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
3852 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3853 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
3854 "command session done\n");
3855 lpfc_bsg_mbox_ext_session_reset(phba);
3856 }
3857
3858 job->reply->result = 0;
3859 job->job_done(job);
3860
3861 return SLI_CONFIG_HANDLED;
3862}
3863
3864/**
3865 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
3866 * @phba: Pointer to HBA context object.
3867 * @dmabuf: Pointer to a DMA buffer descriptor.
3868 *
3869 * This routine sets up the next mailbox read external buffer obtained
3870 * from user space through BSG.
3871 **/
3872static int
3873lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
3874 struct lpfc_dmabuf *dmabuf)
3875{
3876 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3877 struct bsg_job_data *dd_data = NULL;
3878 LPFC_MBOXQ_t *pmboxq = NULL;
3879 MAILBOX_t *pmb;
3880 enum nemb_type nemb_tp;
3881 uint8_t *pbuf;
3882 uint32_t size;
3883 uint32_t index;
3884 int rc;
3885
3886 index = phba->mbox_ext_buf_ctx.seqNum;
3887 phba->mbox_ext_buf_ctx.seqNum++;
3888 nemb_tp = phba->mbox_ext_buf_ctx.nembType;
3889
3890 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
3891 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
3892
3893 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3894 if (!dd_data) {
3895 rc = -ENOMEM;
3896 goto job_error;
3897 }
3898
3899 pbuf = (uint8_t *)dmabuf->virt;
3900 size = job->request_payload.payload_len;
3901 sg_copy_to_buffer(job->request_payload.sg_list,
3902 job->request_payload.sg_cnt,
3903 pbuf, size);
3904
3905 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
3906 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3907 "2966 SLI_CONFIG (mse) ext-buffer wr set "
3908 "buffer[%d], size:%d\n",
3909 phba->mbox_ext_buf_ctx.seqNum, size);
3910
3911 } else {
3912 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3913 "2967 SLI_CONFIG (hbd) ext-buffer wr set "
3914 "buffer[%d], size:%d\n",
3915 phba->mbox_ext_buf_ctx.seqNum, size);
3916
3917 }
3918
3919 /* set up external buffer descriptor and add to external buffer list */
3920 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
3921 phba->mbox_ext_buf_ctx.mbx_dmabuf,
3922 dmabuf);
3923 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3924
3925 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
3926 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3927 "2968 SLI_CONFIG ext-buffer wr all %d "
3928 "ebuffers received\n",
3929 phba->mbox_ext_buf_ctx.numBuf);
3930 /* mailbox command structure for base driver */
3931 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3932 if (!pmboxq) {
3933 rc = -ENOMEM;
3934 goto job_error;
3935 }
3936 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3937 pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
3938 pmb = &pmboxq->u.mb;
3939 memcpy(pmb, pbuf, sizeof(*pmb));
3940 pmb->mbxOwner = OWN_HOST;
3941 pmboxq->vport = phba->pport;
3942
3943 /* callback for multi-buffer write mailbox command */
3944 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
3945
3946 /* context fields to callback function */
3947 pmboxq->context1 = dd_data;
3948 dd_data->type = TYPE_MBOX;
3949 dd_data->context_un.mbox.pmboxq = pmboxq;
3950 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
3951 dd_data->context_un.mbox.set_job = job;
3952 job->dd_data = dd_data;
3953
3954 /* state change */
3955 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3956
3957 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3958 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3959 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3960 "2969 Issued SLI_CONFIG ext-buffer "
3961 "maibox command, rc:x%x\n", rc);
3962 return 1;
3963 }
3964 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3965 "2970 Failed to issue SLI_CONFIG ext-buffer "
3966 "maibox command, rc:x%x\n", rc);
3967 rc = -EPIPE;
3968 goto job_error;
3969 }
3970
3971 /* wait for additoinal external buffers */
3972 job->reply->result = 0;
3973 job->job_done(job);
3974 return SLI_CONFIG_HANDLED;
3975
3976job_error:
3977 lpfc_bsg_dma_page_free(phba, dmabuf);
3978 kfree(dd_data);
3979
3980 return rc;
3981}
3982
3983/**
3984 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
3985 * @phba: Pointer to HBA context object.
3986 * @mb: Pointer to a BSG mailbox object.
3987 * @dmabuff: Pointer to a DMA buffer descriptor.
3988 *
3989 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
3990 * command with multiple non-embedded external buffers.
3991 **/
3992static int
3993lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
3994 struct lpfc_dmabuf *dmabuf)
3995{
3996 int rc;
3997
3998 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3999 "2971 SLI_CONFIG buffer (type:x%x)\n",
4000 phba->mbox_ext_buf_ctx.mboxType);
4001
4002 if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
4003 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
4004 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4005 "2972 SLI_CONFIG rd buffer state "
4006 "mismatch:x%x\n",
4007 phba->mbox_ext_buf_ctx.state);
4008 lpfc_bsg_mbox_ext_abort(phba);
4009 return -EPIPE;
4010 }
4011 rc = lpfc_bsg_read_ebuf_get(phba, job);
4012 if (rc == SLI_CONFIG_HANDLED)
4013 lpfc_bsg_dma_page_free(phba, dmabuf);
4014 } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4015 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
4016 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4017 "2973 SLI_CONFIG wr buffer state "
4018 "mismatch:x%x\n",
4019 phba->mbox_ext_buf_ctx.state);
4020 lpfc_bsg_mbox_ext_abort(phba);
4021 return -EPIPE;
4022 }
4023 rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
4024 }
4025 return rc;
4026}
4027
4028/**
4029 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
4030 * @phba: Pointer to HBA context object.
4031 * @mb: Pointer to a BSG mailbox object.
4032 * @dmabuff: Pointer to a DMA buffer descriptor.
4033 *
4034 * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
4035 * (0x9B) mailbox commands and external buffers.
4036 **/
4037static int
4038lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
4039 struct lpfc_dmabuf *dmabuf)
4040{
4041 struct dfc_mbox_req *mbox_req;
4042 int rc;
4043
4044 mbox_req =
4045 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4046
4047 /* mbox command with/without single external buffer */
4048 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4049 return SLI_CONFIG_NOT_HANDLED;
4050
4051 /* mbox command and first external buffer */
4052 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
4053 if (mbox_req->extSeqNum == 1) {
4054 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4055 "2974 SLI_CONFIG mailbox: tag:%d, "
4056 "seq:%d\n", mbox_req->extMboxTag,
4057 mbox_req->extSeqNum);
4058 rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
4059 return rc;
4060 } else
4061 goto sli_cfg_ext_error;
4062 }
4063
4064 /*
4065 * handle additional external buffers
4066 */
4067
4068 /* check broken pipe conditions */
4069 if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
4070 goto sli_cfg_ext_error;
4071 if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
4072 goto sli_cfg_ext_error;
4073 if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
4074 goto sli_cfg_ext_error;
4075
4076 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4077 "2975 SLI_CONFIG mailbox external buffer: "
4078 "extSta:x%x, tag:%d, seq:%d\n",
4079 phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
4080 mbox_req->extSeqNum);
4081 rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
4082 return rc;
4083
4084sli_cfg_ext_error:
4085 /* all other cases, broken pipe */
4086 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4087 "2976 SLI_CONFIG mailbox broken pipe: "
4088 "ctxSta:x%x, ctxNumBuf:%d "
4089 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4090 phba->mbox_ext_buf_ctx.state,
4091 phba->mbox_ext_buf_ctx.numBuf,
4092 phba->mbox_ext_buf_ctx.mbxTag,
4093 phba->mbox_ext_buf_ctx.seqNum,
4094 mbox_req->extMboxTag, mbox_req->extSeqNum);
4095
4096 lpfc_bsg_mbox_ext_session_reset(phba);
4097
4098 return -EPIPE;
4099}
4100
4101/**
2622 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app 4102 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
2623 * @phba: Pointer to HBA context object. 4103 * @phba: Pointer to HBA context object.
2624 * @mb: Pointer to a mailbox object. 4104 * @mb: Pointer to a mailbox object.
@@ -2638,22 +4118,21 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2638 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */ 4118 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
2639 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */ 4119 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
2640 /* a 4k buffer to hold the mb and extended data from/to the bsg */ 4120 /* a 4k buffer to hold the mb and extended data from/to the bsg */
2641 MAILBOX_t *mb = NULL; 4121 uint8_t *pmbx = NULL;
2642 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */ 4122 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
2643 uint32_t size; 4123 struct lpfc_dmabuf *dmabuf = NULL;
2644 struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */ 4124 struct dfc_mbox_req *mbox_req;
2645 struct lpfc_dmabufext *dmp = NULL; /* for biu diag */
2646 struct ulp_bde64 *rxbpl = NULL;
2647 struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
2648 job->request->rqst_data.h_vendor.vendor_cmd;
2649 struct READ_EVENT_LOG_VAR *rdEventLog; 4125 struct READ_EVENT_LOG_VAR *rdEventLog;
2650 uint32_t transmit_length, receive_length, mode; 4126 uint32_t transmit_length, receive_length, mode;
4127 struct lpfc_mbx_sli4_config *sli4_config;
2651 struct lpfc_mbx_nembed_cmd *nembed_sge; 4128 struct lpfc_mbx_nembed_cmd *nembed_sge;
2652 struct mbox_header *header; 4129 struct mbox_header *header;
2653 struct ulp_bde64 *bde; 4130 struct ulp_bde64 *bde;
2654 uint8_t *ext = NULL; 4131 uint8_t *ext = NULL;
2655 int rc = 0; 4132 int rc = 0;
2656 uint8_t *from; 4133 uint8_t *from;
4134 uint32_t size;
4135
2657 4136
2658 /* in case no data is transferred */ 4137 /* in case no data is transferred */
2659 job->reply->reply_payload_rcv_len = 0; 4138 job->reply->reply_payload_rcv_len = 0;
@@ -2665,6 +4144,18 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2665 goto job_done; 4144 goto job_done;
2666 } 4145 }
2667 4146
4147 /*
4148 * Don't allow mailbox commands to be sent when blocked or when in
4149 * the middle of discovery
4150 */
4151 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4152 rc = -EAGAIN;
4153 goto job_done;
4154 }
4155
4156 mbox_req =
4157 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4158
2668 /* check if requested extended data lengths are valid */ 4159 /* check if requested extended data lengths are valid */
2669 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) || 4160 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
2670 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) { 4161 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
@@ -2672,6 +4163,32 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2672 goto job_done; 4163 goto job_done;
2673 } 4164 }
2674 4165
4166 dmabuf = lpfc_bsg_dma_page_alloc(phba);
4167 if (!dmabuf || !dmabuf->virt) {
4168 rc = -ENOMEM;
4169 goto job_done;
4170 }
4171
4172 /* Get the mailbox command or external buffer from BSG */
4173 pmbx = (uint8_t *)dmabuf->virt;
4174 size = job->request_payload.payload_len;
4175 sg_copy_to_buffer(job->request_payload.sg_list,
4176 job->request_payload.sg_cnt, pmbx, size);
4177
4178 /* Handle possible SLI_CONFIG with non-embedded payloads */
4179 if (phba->sli_rev == LPFC_SLI_REV4) {
4180 rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
4181 if (rc == SLI_CONFIG_HANDLED)
4182 goto job_cont;
4183 if (rc)
4184 goto job_done;
4185 /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4186 }
4187
4188 rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
4189 if (rc != 0)
4190 goto job_done; /* must be negative */
4191
2675 /* allocate our bsg tracking structure */ 4192 /* allocate our bsg tracking structure */
2676 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4193 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
2677 if (!dd_data) { 4194 if (!dd_data) {
@@ -2681,12 +4198,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2681 goto job_done; 4198 goto job_done;
2682 } 4199 }
2683 4200
2684 mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL);
2685 if (!mb) {
2686 rc = -ENOMEM;
2687 goto job_done;
2688 }
2689
2690 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4201 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2691 if (!pmboxq) { 4202 if (!pmboxq) {
2692 rc = -ENOMEM; 4203 rc = -ENOMEM;
@@ -2694,17 +4205,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2694 } 4205 }
2695 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4206 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
2696 4207
2697 size = job->request_payload.payload_len;
2698 sg_copy_to_buffer(job->request_payload.sg_list,
2699 job->request_payload.sg_cnt,
2700 mb, size);
2701
2702 rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
2703 if (rc != 0)
2704 goto job_done; /* must be negative */
2705
2706 pmb = &pmboxq->u.mb; 4208 pmb = &pmboxq->u.mb;
2707 memcpy(pmb, mb, sizeof(*pmb)); 4209 memcpy(pmb, pmbx, sizeof(*pmb));
2708 pmb->mbxOwner = OWN_HOST; 4210 pmb->mbxOwner = OWN_HOST;
2709 pmboxq->vport = vport; 4211 pmboxq->vport = vport;
2710 4212
@@ -2721,30 +4223,13 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2721 "0x%x while in stopped state.\n", 4223 "0x%x while in stopped state.\n",
2722 pmb->mbxCommand); 4224 pmb->mbxCommand);
2723 4225
2724 /* Don't allow mailbox commands to be sent when blocked
2725 * or when in the middle of discovery
2726 */
2727 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
2728 rc = -EAGAIN;
2729 goto job_done;
2730 }
2731
2732 /* extended mailbox commands will need an extended buffer */ 4226 /* extended mailbox commands will need an extended buffer */
2733 if (mbox_req->inExtWLen || mbox_req->outExtWLen) { 4227 if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
2734 ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
2735 if (!ext) {
2736 rc = -ENOMEM;
2737 goto job_done;
2738 }
2739
2740 /* any data for the device? */ 4228 /* any data for the device? */
2741 if (mbox_req->inExtWLen) { 4229 if (mbox_req->inExtWLen) {
2742 from = (uint8_t *)mb; 4230 from = pmbx;
2743 from += sizeof(MAILBOX_t); 4231 ext = from + sizeof(MAILBOX_t);
2744 memcpy((uint8_t *)ext, from,
2745 mbox_req->inExtWLen * sizeof(uint32_t));
2746 } 4232 }
2747
2748 pmboxq->context2 = ext; 4233 pmboxq->context2 = ext;
2749 pmboxq->in_ext_byte_len = 4234 pmboxq->in_ext_byte_len =
2750 mbox_req->inExtWLen * sizeof(uint32_t); 4235 mbox_req->inExtWLen * sizeof(uint32_t);
@@ -2768,46 +4253,17 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2768 rc = -ERANGE; 4253 rc = -ERANGE;
2769 goto job_done; 4254 goto job_done;
2770 } 4255 }
2771
2772 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2773 if (!rxbmp) {
2774 rc = -ENOMEM;
2775 goto job_done;
2776 }
2777
2778 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2779 if (!rxbmp->virt) {
2780 rc = -ENOMEM;
2781 goto job_done;
2782 }
2783
2784 INIT_LIST_HEAD(&rxbmp->list);
2785 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2786 dmp = diag_cmd_data_alloc(phba, rxbpl, transmit_length, 0);
2787 if (!dmp) {
2788 rc = -ENOMEM;
2789 goto job_done;
2790 }
2791
2792 INIT_LIST_HEAD(&dmp->dma.list);
2793 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = 4256 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
2794 putPaddrHigh(dmp->dma.phys); 4257 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
2795 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = 4258 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
2796 putPaddrLow(dmp->dma.phys); 4259 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
2797 4260
2798 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = 4261 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
2799 putPaddrHigh(dmp->dma.phys + 4262 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
2800 pmb->un.varBIUdiag.un.s2. 4263 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
2801 xmit_bde64.tus.f.bdeSize);
2802 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = 4264 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
2803 putPaddrLow(dmp->dma.phys + 4265 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
2804 pmb->un.varBIUdiag.un.s2. 4266 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
2805 xmit_bde64.tus.f.bdeSize);
2806
2807 /* copy the transmit data found in the mailbox extension area */
2808 from = (uint8_t *)mb;
2809 from += sizeof(MAILBOX_t);
2810 memcpy((uint8_t *)dmp->dma.virt, from, transmit_length);
2811 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) { 4267 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
2812 rdEventLog = &pmb->un.varRdEventLog; 4268 rdEventLog = &pmb->un.varRdEventLog;
2813 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize; 4269 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
@@ -2823,33 +4279,10 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2823 4279
2824 /* mode zero uses a bde like biu diags command */ 4280 /* mode zero uses a bde like biu diags command */
2825 if (mode == 0) { 4281 if (mode == 0) {
2826 4282 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
2827 /* rebuild the command for sli4 using our own buffers 4283 + sizeof(MAILBOX_t));
2828 * like we do for biu diags 4284 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
2829 */ 4285 + sizeof(MAILBOX_t));
2830
2831 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2832 if (!rxbmp) {
2833 rc = -ENOMEM;
2834 goto job_done;
2835 }
2836
2837 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2838 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2839 if (rxbpl) {
2840 INIT_LIST_HEAD(&rxbmp->list);
2841 dmp = diag_cmd_data_alloc(phba, rxbpl,
2842 receive_length, 0);
2843 }
2844
2845 if (!dmp) {
2846 rc = -ENOMEM;
2847 goto job_done;
2848 }
2849
2850 INIT_LIST_HEAD(&dmp->dma.list);
2851 pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
2852 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
2853 } 4286 }
2854 } else if (phba->sli_rev == LPFC_SLI_REV4) { 4287 } else if (phba->sli_rev == LPFC_SLI_REV4) {
2855 if (pmb->mbxCommand == MBX_DUMP_MEMORY) { 4288 if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
@@ -2860,36 +4293,14 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2860 /* receive length cannot be greater than mailbox 4293 /* receive length cannot be greater than mailbox
2861 * extension size 4294 * extension size
2862 */ 4295 */
2863 if ((receive_length == 0) || 4296 if (receive_length == 0) {
2864 (receive_length > MAILBOX_EXT_SIZE)) {
2865 rc = -ERANGE; 4297 rc = -ERANGE;
2866 goto job_done; 4298 goto job_done;
2867 } 4299 }
2868 4300 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
2869 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4301 + sizeof(MAILBOX_t));
2870 if (!rxbmp) { 4302 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
2871 rc = -ENOMEM; 4303 + sizeof(MAILBOX_t));
2872 goto job_done;
2873 }
2874
2875 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2876 if (!rxbmp->virt) {
2877 rc = -ENOMEM;
2878 goto job_done;
2879 }
2880
2881 INIT_LIST_HEAD(&rxbmp->list);
2882 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2883 dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
2884 0);
2885 if (!dmp) {
2886 rc = -ENOMEM;
2887 goto job_done;
2888 }
2889
2890 INIT_LIST_HEAD(&dmp->dma.list);
2891 pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
2892 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
2893 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) && 4304 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
2894 pmb->un.varUpdateCfg.co) { 4305 pmb->un.varUpdateCfg.co) {
2895 bde = (struct ulp_bde64 *)&pmb->un.varWords[4]; 4306 bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
@@ -2899,102 +4310,53 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2899 rc = -ERANGE; 4310 rc = -ERANGE;
2900 goto job_done; 4311 goto job_done;
2901 } 4312 }
2902 4313 bde->addrHigh = putPaddrHigh(dmabuf->phys
2903 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4314 + sizeof(MAILBOX_t));
2904 if (!rxbmp) { 4315 bde->addrLow = putPaddrLow(dmabuf->phys
2905 rc = -ENOMEM; 4316 + sizeof(MAILBOX_t));
2906 goto job_done;
2907 }
2908
2909 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2910 if (!rxbmp->virt) {
2911 rc = -ENOMEM;
2912 goto job_done;
2913 }
2914
2915 INIT_LIST_HEAD(&rxbmp->list);
2916 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2917 dmp = diag_cmd_data_alloc(phba, rxbpl,
2918 bde->tus.f.bdeSize, 0);
2919 if (!dmp) {
2920 rc = -ENOMEM;
2921 goto job_done;
2922 }
2923
2924 INIT_LIST_HEAD(&dmp->dma.list);
2925 bde->addrHigh = putPaddrHigh(dmp->dma.phys);
2926 bde->addrLow = putPaddrLow(dmp->dma.phys);
2927
2928 /* copy the transmit data found in the mailbox
2929 * extension area
2930 */
2931 from = (uint8_t *)mb;
2932 from += sizeof(MAILBOX_t);
2933 memcpy((uint8_t *)dmp->dma.virt, from,
2934 bde->tus.f.bdeSize);
2935 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) { 4317 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
2936 /* rebuild the command for sli4 using our own buffers 4318 /* Handling non-embedded SLI_CONFIG mailbox command */
2937 * like we do for biu diags 4319 sli4_config = &pmboxq->u.mqe.un.sli4_config;
2938 */ 4320 if (!bf_get(lpfc_mbox_hdr_emb,
2939 header = (struct mbox_header *)&pmb->un.varWords[0]; 4321 &sli4_config->header.cfg_mhdr)) {
2940 nembed_sge = (struct lpfc_mbx_nembed_cmd *) 4322 /* rebuild the command for sli4 using our
2941 &pmb->un.varWords[0]; 4323 * own buffers like we do for biu diags
2942 receive_length = nembed_sge->sge[0].length; 4324 */
2943 4325 header = (struct mbox_header *)
2944 /* receive length cannot be greater than mailbox 4326 &pmb->un.varWords[0];
2945 * extension size 4327 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
2946 */ 4328 &pmb->un.varWords[0];
2947 if ((receive_length == 0) || 4329 receive_length = nembed_sge->sge[0].length;
2948 (receive_length > MAILBOX_EXT_SIZE)) { 4330
2949 rc = -ERANGE; 4331 /* receive length cannot be greater than
2950 goto job_done; 4332 * mailbox extension size
2951 } 4333 */
2952 4334 if ((receive_length == 0) ||
2953 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4335 (receive_length > MAILBOX_EXT_SIZE)) {
2954 if (!rxbmp) { 4336 rc = -ERANGE;
2955 rc = -ENOMEM; 4337 goto job_done;
2956 goto job_done; 4338 }
2957 }
2958
2959 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2960 if (!rxbmp->virt) {
2961 rc = -ENOMEM;
2962 goto job_done;
2963 }
2964 4339
2965 INIT_LIST_HEAD(&rxbmp->list); 4340 nembed_sge->sge[0].pa_hi =
2966 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 4341 putPaddrHigh(dmabuf->phys
2967 dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length, 4342 + sizeof(MAILBOX_t));
2968 0); 4343 nembed_sge->sge[0].pa_lo =
2969 if (!dmp) { 4344 putPaddrLow(dmabuf->phys
2970 rc = -ENOMEM; 4345 + sizeof(MAILBOX_t));
2971 goto job_done;
2972 } 4346 }
2973
2974 INIT_LIST_HEAD(&dmp->dma.list);
2975 nembed_sge->sge[0].pa_hi = putPaddrHigh(dmp->dma.phys);
2976 nembed_sge->sge[0].pa_lo = putPaddrLow(dmp->dma.phys);
2977 /* copy the transmit data found in the mailbox
2978 * extension area
2979 */
2980 from = (uint8_t *)mb;
2981 from += sizeof(MAILBOX_t);
2982 memcpy((uint8_t *)dmp->dma.virt, from,
2983 header->cfg_mhdr.payload_length);
2984 } 4347 }
2985 } 4348 }
2986 4349
2987 dd_data->context_un.mbox.rxbmp = rxbmp; 4350 dd_data->context_un.mbox.dmabuffers = dmabuf;
2988 dd_data->context_un.mbox.dmp = dmp;
2989 4351
2990 /* setup wake call as IOCB callback */ 4352 /* setup wake call as IOCB callback */
2991 pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait; 4353 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
2992 4354
2993 /* setup context field to pass wait_queue pointer to wake function */ 4355 /* setup context field to pass wait_queue pointer to wake function */
2994 pmboxq->context1 = dd_data; 4356 pmboxq->context1 = dd_data;
2995 dd_data->type = TYPE_MBOX; 4357 dd_data->type = TYPE_MBOX;
2996 dd_data->context_un.mbox.pmboxq = pmboxq; 4358 dd_data->context_un.mbox.pmboxq = pmboxq;
2997 dd_data->context_un.mbox.mb = mb; 4359 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
2998 dd_data->context_un.mbox.set_job = job; 4360 dd_data->context_un.mbox.set_job = job;
2999 dd_data->context_un.mbox.ext = ext; 4361 dd_data->context_un.mbox.ext = ext;
3000 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset; 4362 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
@@ -3011,11 +4373,11 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3011 } 4373 }
3012 4374
3013 /* job finished, copy the data */ 4375 /* job finished, copy the data */
3014 memcpy(mb, pmb, sizeof(*pmb)); 4376 memcpy(pmbx, pmb, sizeof(*pmb));
3015 job->reply->reply_payload_rcv_len = 4377 job->reply->reply_payload_rcv_len =
3016 sg_copy_from_buffer(job->reply_payload.sg_list, 4378 sg_copy_from_buffer(job->reply_payload.sg_list,
3017 job->reply_payload.sg_cnt, 4379 job->reply_payload.sg_cnt,
3018 mb, size); 4380 pmbx, size);
3019 /* not waiting mbox already done */ 4381 /* not waiting mbox already done */
3020 rc = 0; 4382 rc = 0;
3021 goto job_done; 4383 goto job_done;
@@ -3027,22 +4389,12 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3027 4389
3028job_done: 4390job_done:
3029 /* common exit for error or job completed inline */ 4391 /* common exit for error or job completed inline */
3030 kfree(mb);
3031 if (pmboxq) 4392 if (pmboxq)
3032 mempool_free(pmboxq, phba->mbox_mem_pool); 4393 mempool_free(pmboxq, phba->mbox_mem_pool);
3033 kfree(ext); 4394 lpfc_bsg_dma_page_free(phba, dmabuf);
3034 if (dmp) {
3035 dma_free_coherent(&phba->pcidev->dev,
3036 dmp->size, dmp->dma.virt,
3037 dmp->dma.phys);
3038 kfree(dmp);
3039 }
3040 if (rxbmp) {
3041 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
3042 kfree(rxbmp);
3043 }
3044 kfree(dd_data); 4395 kfree(dd_data);
3045 4396
4397job_cont:
3046 return rc; 4398 return rc;
3047} 4399}
3048 4400
@@ -3055,37 +4407,28 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
3055{ 4407{
3056 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 4408 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
3057 struct lpfc_hba *phba = vport->phba; 4409 struct lpfc_hba *phba = vport->phba;
4410 struct dfc_mbox_req *mbox_req;
3058 int rc = 0; 4411 int rc = 0;
3059 4412
3060 /* in case no data is transferred */ 4413 /* mix-and-match backward compatibility */
3061 job->reply->reply_payload_rcv_len = 0; 4414 job->reply->reply_payload_rcv_len = 0;
3062 if (job->request_len < 4415 if (job->request_len <
3063 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { 4416 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
3064 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 4417 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3065 "2737 Received MBOX_REQ request below " 4418 "2737 Mix-and-match backward compability "
3066 "minimum size\n"); 4419 "between MBOX_REQ old size:%d and "
3067 rc = -EINVAL; 4420 "new request size:%d\n",
3068 goto job_error; 4421 (int)(job->request_len -
3069 } 4422 sizeof(struct fc_bsg_request)),
3070 4423 (int)sizeof(struct dfc_mbox_req));
3071 if (job->request_payload.payload_len != BSG_MBOX_SIZE) { 4424 mbox_req = (struct dfc_mbox_req *)
3072 rc = -EINVAL; 4425 job->request->rqst_data.h_vendor.vendor_cmd;
3073 goto job_error; 4426 mbox_req->extMboxTag = 0;
3074 } 4427 mbox_req->extSeqNum = 0;
3075
3076 if (job->reply_payload.payload_len != BSG_MBOX_SIZE) {
3077 rc = -EINVAL;
3078 goto job_error;
3079 }
3080
3081 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
3082 rc = -EAGAIN;
3083 goto job_error;
3084 } 4428 }
3085 4429
3086 rc = lpfc_bsg_issue_mbox(phba, job, vport); 4430 rc = lpfc_bsg_issue_mbox(phba, job, vport);
3087 4431
3088job_error:
3089 if (rc == 0) { 4432 if (rc == 0) {
3090 /* job done */ 4433 /* job done */
3091 job->reply->result = 0; 4434 job->reply->result = 0;
@@ -3416,10 +4759,16 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
3416 rc = lpfc_bsg_send_mgmt_rsp(job); 4759 rc = lpfc_bsg_send_mgmt_rsp(job);
3417 break; 4760 break;
3418 case LPFC_BSG_VENDOR_DIAG_MODE: 4761 case LPFC_BSG_VENDOR_DIAG_MODE:
3419 rc = lpfc_bsg_diag_mode(job); 4762 rc = lpfc_bsg_diag_loopback_mode(job);
4763 break;
4764 case LPFC_BSG_VENDOR_DIAG_MODE_END:
4765 rc = lpfc_sli4_bsg_diag_mode_end(job);
4766 break;
4767 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
4768 rc = lpfc_bsg_diag_loopback_run(job);
3420 break; 4769 break;
3421 case LPFC_BSG_VENDOR_DIAG_TEST: 4770 case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
3422 rc = lpfc_bsg_diag_test(job); 4771 rc = lpfc_sli4_bsg_link_diag_test(job);
3423 break; 4772 break;
3424 case LPFC_BSG_VENDOR_GET_MGMT_REV: 4773 case LPFC_BSG_VENDOR_GET_MGMT_REV:
3425 rc = lpfc_bsg_get_dfc_rev(job); 4774 rc = lpfc_bsg_get_dfc_rev(job);
@@ -3538,6 +4887,8 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
3538 /* the mbox completion handler can now be run */ 4887 /* the mbox completion handler can now be run */
3539 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 4888 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3540 job->job_done(job); 4889 job->job_done(job);
4890 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
4891 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
3541 break; 4892 break;
3542 case TYPE_MENLO: 4893 case TYPE_MENLO:
3543 menlo = &dd_data->context_un.menlo; 4894 menlo = &dd_data->context_un.menlo;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index b542aca6f5ae..c8c2b47ea886 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -24,15 +24,17 @@
24 * These are the vendor unique structures passed in using the bsg 24 * These are the vendor unique structures passed in using the bsg
25 * FC_BSG_HST_VENDOR message code type. 25 * FC_BSG_HST_VENDOR message code type.
26 */ 26 */
27#define LPFC_BSG_VENDOR_SET_CT_EVENT 1 27#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
28#define LPFC_BSG_VENDOR_GET_CT_EVENT 2 28#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
29#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3 29#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
30#define LPFC_BSG_VENDOR_DIAG_MODE 4 30#define LPFC_BSG_VENDOR_DIAG_MODE 4
31#define LPFC_BSG_VENDOR_DIAG_TEST 5 31#define LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK 5
32#define LPFC_BSG_VENDOR_GET_MGMT_REV 6 32#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
33#define LPFC_BSG_VENDOR_MBOX 7 33#define LPFC_BSG_VENDOR_MBOX 7
34#define LPFC_BSG_VENDOR_MENLO_CMD 8 34#define LPFC_BSG_VENDOR_MENLO_CMD 8
35#define LPFC_BSG_VENDOR_MENLO_DATA 9 35#define LPFC_BSG_VENDOR_MENLO_DATA 9
36#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
37#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
36 38
37struct set_ct_event { 39struct set_ct_event {
38 uint32_t command; 40 uint32_t command;
@@ -67,10 +69,25 @@ struct diag_mode_set {
67 uint32_t timeout; 69 uint32_t timeout;
68}; 70};
69 71
72struct sli4_link_diag {
73 uint32_t command;
74 uint32_t timeout;
75 uint32_t test_id;
76 uint32_t loops;
77 uint32_t test_version;
78 uint32_t error_action;
79};
80
70struct diag_mode_test { 81struct diag_mode_test {
71 uint32_t command; 82 uint32_t command;
72}; 83};
73 84
85struct diag_status {
86 uint32_t mbox_status;
87 uint32_t shdr_status;
88 uint32_t shdr_add_status;
89};
90
74#define LPFC_WWNN_TYPE 0 91#define LPFC_WWNN_TYPE 0
75#define LPFC_WWPN_TYPE 1 92#define LPFC_WWPN_TYPE 1
76 93
@@ -92,11 +109,15 @@ struct get_mgmt_rev_reply {
92}; 109};
93 110
94#define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */ 111#define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */
112
113/* BSG mailbox request header */
95struct dfc_mbox_req { 114struct dfc_mbox_req {
96 uint32_t command; 115 uint32_t command;
97 uint32_t mbOffset; 116 uint32_t mbOffset;
98 uint32_t inExtWLen; 117 uint32_t inExtWLen;
99 uint32_t outExtWLen; 118 uint32_t outExtWLen;
119 uint32_t extMboxTag;
120 uint32_t extSeqNum;
100}; 121};
101 122
102/* Used for menlo command or menlo data. The xri is only used for menlo data */ 123/* Used for menlo command or menlo data. The xri is only used for menlo data */
@@ -171,7 +192,7 @@ struct lpfc_sli_config_mse {
171#define lpfc_mbox_sli_config_mse_len_WORD buf_len 192#define lpfc_mbox_sli_config_mse_len_WORD buf_len
172}; 193};
173 194
174struct lpfc_sli_config_subcmd_hbd { 195struct lpfc_sli_config_hbd {
175 uint32_t buf_len; 196 uint32_t buf_len;
176#define lpfc_mbox_sli_config_ecmn_hbd_len_SHIFT 0 197#define lpfc_mbox_sli_config_ecmn_hbd_len_SHIFT 0
177#define lpfc_mbox_sli_config_ecmn_hbd_len_MASK 0xffffff 198#define lpfc_mbox_sli_config_ecmn_hbd_len_MASK 0xffffff
@@ -194,21 +215,39 @@ struct lpfc_sli_config_hdr {
194 uint32_t reserved5; 215 uint32_t reserved5;
195}; 216};
196 217
197struct lpfc_sli_config_generic { 218struct lpfc_sli_config_emb0_subsys {
198 struct lpfc_sli_config_hdr sli_config_hdr; 219 struct lpfc_sli_config_hdr sli_config_hdr;
199#define LPFC_MBX_SLI_CONFIG_MAX_MSE 19 220#define LPFC_MBX_SLI_CONFIG_MAX_MSE 19
200 struct lpfc_sli_config_mse mse[LPFC_MBX_SLI_CONFIG_MAX_MSE]; 221 struct lpfc_sli_config_mse mse[LPFC_MBX_SLI_CONFIG_MAX_MSE];
222 uint32_t padding;
223 uint32_t word64;
224#define lpfc_emb0_subcmnd_opcode_SHIFT 0
225#define lpfc_emb0_subcmnd_opcode_MASK 0xff
226#define lpfc_emb0_subcmnd_opcode_WORD word64
227#define lpfc_emb0_subcmnd_subsys_SHIFT 8
228#define lpfc_emb0_subcmnd_subsys_MASK 0xff
229#define lpfc_emb0_subcmnd_subsys_WORD word64
230/* Subsystem FCOE (0x0C) OpCodes */
231#define SLI_CONFIG_SUBSYS_FCOE 0x0C
232#define FCOE_OPCODE_READ_FCF 0x08
233#define FCOE_OPCODE_ADD_FCF 0x09
201}; 234};
202 235
203struct lpfc_sli_config_subcmnd { 236struct lpfc_sli_config_emb1_subsys {
204 struct lpfc_sli_config_hdr sli_config_hdr; 237 struct lpfc_sli_config_hdr sli_config_hdr;
205 uint32_t word6; 238 uint32_t word6;
206#define lpfc_subcmnd_opcode_SHIFT 0 239#define lpfc_emb1_subcmnd_opcode_SHIFT 0
207#define lpfc_subcmnd_opcode_MASK 0xff 240#define lpfc_emb1_subcmnd_opcode_MASK 0xff
208#define lpfc_subcmnd_opcode_WORD word6 241#define lpfc_emb1_subcmnd_opcode_WORD word6
209#define lpfc_subcmnd_subsys_SHIFT 8 242#define lpfc_emb1_subcmnd_subsys_SHIFT 8
210#define lpfc_subcmnd_subsys_MASK 0xff 243#define lpfc_emb1_subcmnd_subsys_MASK 0xff
211#define lpfc_subcmnd_subsys_WORD word6 244#define lpfc_emb1_subcmnd_subsys_WORD word6
245/* Subsystem COMN (0x01) OpCodes */
246#define SLI_CONFIG_SUBSYS_COMN 0x01
247#define COMN_OPCODE_READ_OBJECT 0xAB
248#define COMN_OPCODE_WRITE_OBJECT 0xAC
249#define COMN_OPCODE_READ_OBJECT_LIST 0xAD
250#define COMN_OPCODE_DELETE_OBJECT 0xAE
212 uint32_t timeout; 251 uint32_t timeout;
213 uint32_t request_length; 252 uint32_t request_length;
214 uint32_t word9; 253 uint32_t word9;
@@ -222,8 +261,8 @@ struct lpfc_sli_config_subcmnd {
222 uint32_t rd_offset; 261 uint32_t rd_offset;
223 uint32_t obj_name[26]; 262 uint32_t obj_name[26];
224 uint32_t hbd_count; 263 uint32_t hbd_count;
225#define LPFC_MBX_SLI_CONFIG_MAX_HBD 10 264#define LPFC_MBX_SLI_CONFIG_MAX_HBD 8
226 struct lpfc_sli_config_subcmd_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD]; 265 struct lpfc_sli_config_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
227}; 266};
228 267
229struct lpfc_sli_config_mbox { 268struct lpfc_sli_config_mbox {
@@ -235,7 +274,11 @@ struct lpfc_sli_config_mbox {
235#define lpfc_mqe_command_MASK 0x000000FF 274#define lpfc_mqe_command_MASK 0x000000FF
236#define lpfc_mqe_command_WORD word0 275#define lpfc_mqe_command_WORD word0
237 union { 276 union {
238 struct lpfc_sli_config_generic sli_config_generic; 277 struct lpfc_sli_config_emb0_subsys sli_config_emb0_subsys;
239 struct lpfc_sli_config_subcmnd sli_config_subcmnd; 278 struct lpfc_sli_config_emb1_subsys sli_config_emb1_subsys;
240 } un; 279 } un;
241}; 280};
281
282/* driver only */
283#define SLI_CONFIG_NOT_HANDLED 0
284#define SLI_CONFIG_HANDLED 1
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index f0b332f4eedb..fc20c247f36b 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -55,6 +55,8 @@ void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
55void lpfc_supported_pages(struct lpfcMboxq *); 55void lpfc_supported_pages(struct lpfcMboxq *);
56void lpfc_pc_sli4_params(struct lpfcMboxq *); 56void lpfc_pc_sli4_params(struct lpfcMboxq *);
57int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *); 57int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
58int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
59 uint16_t, uint16_t, bool);
58int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *); 60int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
59struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); 61struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
60void lpfc_cleanup_rcv_buffers(struct lpfc_vport *); 62void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
@@ -171,6 +173,7 @@ void lpfc_delayed_disc_tmo(unsigned long);
171void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *); 173void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
172 174
173int lpfc_config_port_prep(struct lpfc_hba *); 175int lpfc_config_port_prep(struct lpfc_hba *);
176void lpfc_update_vport_wwn(struct lpfc_vport *vport);
174int lpfc_config_port_post(struct lpfc_hba *); 177int lpfc_config_port_post(struct lpfc_hba *);
175int lpfc_hba_down_prep(struct lpfc_hba *); 178int lpfc_hba_down_prep(struct lpfc_hba *);
176int lpfc_hba_down_post(struct lpfc_hba *); 179int lpfc_hba_down_post(struct lpfc_hba *);
@@ -365,6 +368,10 @@ extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
365 uint32_t, uint32_t); 368 uint32_t, uint32_t);
366extern struct lpfc_hbq_init *lpfc_hbq_defs[]; 369extern struct lpfc_hbq_init *lpfc_hbq_defs[];
367 370
371/* SLI4 if_type 2 externs. */
372int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *);
373int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *);
374
368/* externs BlockGuard */ 375/* externs BlockGuard */
369extern char *_dump_buf_data; 376extern char *_dump_buf_data;
370extern unsigned long _dump_buf_data_order; 377extern unsigned long _dump_buf_data_order;
@@ -429,3 +436,6 @@ void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
429void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *); 436void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
430struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t, 437struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
431 uint32_t); 438 uint32_t);
439int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
440/* functions to support SR-IOV */
441int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index d9edfd90d7ff..779b88e1469d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -352,6 +352,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
352 icmd->ulpLe = 1; 352 icmd->ulpLe = 1;
353 icmd->ulpClass = CLASS3; 353 icmd->ulpClass = CLASS3;
354 icmd->ulpContext = ndlp->nlp_rpi; 354 icmd->ulpContext = ndlp->nlp_rpi;
355 if (phba->sli_rev == LPFC_SLI_REV4)
356 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
355 357
356 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 358 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
357 /* For GEN_REQUEST64_CR, use the RPI */ 359 /* For GEN_REQUEST64_CR, use the RPI */
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index c93fca058603..ffe82d169b40 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1665,7 +1665,8 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
1665 /* Get fast-path complete queue information */ 1665 /* Get fast-path complete queue information */
1666 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1666 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1667 "Fast-path FCP CQ information:\n"); 1667 "Fast-path FCP CQ information:\n");
1668 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) { 1668 fcp_qidx = 0;
1669 do {
1669 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1670 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1670 "Associated EQID[%02d]:\n", 1671 "Associated EQID[%02d]:\n",
1671 phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid); 1672 phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
@@ -1678,7 +1679,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
1678 phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size, 1679 phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
1679 phba->sli4_hba.fcp_cq[fcp_qidx]->host_index, 1680 phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
1680 phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index); 1681 phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
1681 } 1682 } while (++fcp_qidx < phba->cfg_fcp_eq_count);
1682 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); 1683 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
1683 1684
1684 /* Get mailbox queue information */ 1685 /* Get mailbox queue information */
@@ -2012,7 +2013,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2012 goto pass_check; 2013 goto pass_check;
2013 } 2014 }
2014 /* FCP complete queue */ 2015 /* FCP complete queue */
2015 for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) { 2016 qidx = 0;
2017 do {
2016 if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) { 2018 if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) {
2017 /* Sanity check */ 2019 /* Sanity check */
2018 rc = lpfc_idiag_que_param_check( 2020 rc = lpfc_idiag_que_param_check(
@@ -2024,7 +2026,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2024 phba->sli4_hba.fcp_cq[qidx]; 2026 phba->sli4_hba.fcp_cq[qidx];
2025 goto pass_check; 2027 goto pass_check;
2026 } 2028 }
2027 } 2029 } while (++qidx < phba->cfg_fcp_eq_count);
2028 goto error_out; 2030 goto error_out;
2029 break; 2031 break;
2030 case LPFC_IDIAG_MQ: 2032 case LPFC_IDIAG_MQ:
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index e2c452467c8b..32a084534f3e 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -250,7 +250,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
250 icmd->un.elsreq64.myID = vport->fc_myDID; 250 icmd->un.elsreq64.myID = vport->fc_myDID;
251 251
252 /* For ELS_REQUEST64_CR, use the VPI by default */ 252 /* For ELS_REQUEST64_CR, use the VPI by default */
253 icmd->ulpContext = vport->vpi + phba->vpi_base; 253 icmd->ulpContext = phba->vpi_ids[vport->vpi];
254 icmd->ulpCt_h = 0; 254 icmd->ulpCt_h = 0;
255 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 255 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
256 if (elscmd == ELS_CMD_ECHO) 256 if (elscmd == ELS_CMD_ECHO)
@@ -454,6 +454,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
454 rc = -ENOMEM; 454 rc = -ENOMEM;
455 goto fail_free_dmabuf; 455 goto fail_free_dmabuf;
456 } 456 }
457
457 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 458 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
458 if (!mboxq) { 459 if (!mboxq) {
459 rc = -ENOMEM; 460 rc = -ENOMEM;
@@ -6585,6 +6586,26 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6585{ 6586{
6586 struct lpfc_vport *vport; 6587 struct lpfc_vport *vport;
6587 unsigned long flags; 6588 unsigned long flags;
6589 int i;
6590
6591 /* The physical ports are always vpi 0 - translate is unnecessary. */
6592 if (vpi > 0) {
6593 /*
6594 * Translate the physical vpi to the logical vpi. The
6595 * vport stores the logical vpi.
6596 */
6597 for (i = 0; i < phba->max_vpi; i++) {
6598 if (vpi == phba->vpi_ids[i])
6599 break;
6600 }
6601
6602 if (i >= phba->max_vpi) {
6603 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
6604 "2936 Could not find Vport mapped "
6605 "to vpi %d\n", vpi);
6606 return NULL;
6607 }
6608 }
6588 6609
6589 spin_lock_irqsave(&phba->hbalock, flags); 6610 spin_lock_irqsave(&phba->hbalock, flags);
6590 list_for_each_entry(vport, &phba->port_list, listentry) { 6611 list_for_each_entry(vport, &phba->port_list, listentry) {
@@ -6641,8 +6662,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6641 vport = phba->pport; 6662 vport = phba->pport;
6642 else 6663 else
6643 vport = lpfc_find_vport_by_vpid(phba, 6664 vport = lpfc_find_vport_by_vpid(phba,
6644 icmd->unsli3.rcvsli3.vpi - phba->vpi_base); 6665 icmd->unsli3.rcvsli3.vpi);
6645 } 6666 }
6667
6646 /* If there are no BDEs associated 6668 /* If there are no BDEs associated
6647 * with this IOCB, there is nothing to do. 6669 * with this IOCB, there is nothing to do.
6648 */ 6670 */
@@ -7222,7 +7244,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
7222 elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1; 7244 elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
7223 elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ; 7245 elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
7224 /* Set the ulpContext to the vpi */ 7246 /* Set the ulpContext to the vpi */
7225 elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base; 7247 elsiocb->iocb.ulpContext = phba->vpi_ids[vport->vpi];
7226 } else { 7248 } else {
7227 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */ 7249 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
7228 icmd->ulpCt_h = 1; 7250 icmd->ulpCt_h = 1;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 7a35df5e2038..18d0dbfda2bc 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -881,7 +881,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
881 /* Clean up any firmware default rpi's */ 881 /* Clean up any firmware default rpi's */
882 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 882 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
883 if (mb) { 883 if (mb) {
884 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb); 884 lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
885 mb->vport = vport; 885 mb->vport = vport;
886 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 886 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
887 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) 887 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
@@ -2690,16 +2690,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2690 2690
2691 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt, 2691 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
2692 sizeof (struct serv_parm)); 2692 sizeof (struct serv_parm));
2693 if (phba->cfg_soft_wwnn) 2693 lpfc_update_vport_wwn(vport);
2694 u64_to_wwn(phba->cfg_soft_wwnn,
2695 vport->fc_sparam.nodeName.u.wwn);
2696 if (phba->cfg_soft_wwpn)
2697 u64_to_wwn(phba->cfg_soft_wwpn,
2698 vport->fc_sparam.portName.u.wwn);
2699 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
2700 sizeof(vport->fc_nodename));
2701 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
2702 sizeof(vport->fc_portname));
2703 if (vport->port_type == LPFC_PHYSICAL_PORT) { 2694 if (vport->port_type == LPFC_PHYSICAL_PORT) {
2704 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn)); 2695 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
2705 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn)); 2696 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
@@ -3430,7 +3421,8 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3430 return; 3421 return;
3431 } 3422 }
3432 3423
3433 ndlp->nlp_rpi = mb->un.varWords[0]; 3424 if (phba->sli_rev < LPFC_SLI_REV4)
3425 ndlp->nlp_rpi = mb->un.varWords[0];
3434 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 3426 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3435 ndlp->nlp_type |= NLP_FABRIC; 3427 ndlp->nlp_type |= NLP_FABRIC;
3436 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3428 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -3504,7 +3496,8 @@ out:
3504 return; 3496 return;
3505 } 3497 }
3506 3498
3507 ndlp->nlp_rpi = mb->un.varWords[0]; 3499 if (phba->sli_rev < LPFC_SLI_REV4)
3500 ndlp->nlp_rpi = mb->un.varWords[0];
3508 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 3501 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3509 ndlp->nlp_type |= NLP_FABRIC; 3502 ndlp->nlp_type |= NLP_FABRIC;
3510 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3503 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -3591,7 +3584,6 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3591 if (ndlp->nlp_type & NLP_FCP_INITIATOR) 3584 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
3592 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; 3585 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
3593 3586
3594
3595 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) 3587 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
3596 fc_remote_port_rolechg(rport, rport_ids.roles); 3588 fc_remote_port_rolechg(rport, rport_ids.roles);
3597 3589
@@ -4106,11 +4098,16 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4106 struct lpfc_hba *phba = vport->phba; 4098 struct lpfc_hba *phba = vport->phba;
4107 LPFC_MBOXQ_t *mbox; 4099 LPFC_MBOXQ_t *mbox;
4108 int rc; 4100 int rc;
4101 uint16_t rpi;
4109 4102
4110 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { 4103 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4111 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4104 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4112 if (mbox) { 4105 if (mbox) {
4113 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); 4106 /* SLI4 ports require the physical rpi value. */
4107 rpi = ndlp->nlp_rpi;
4108 if (phba->sli_rev == LPFC_SLI_REV4)
4109 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4110 lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
4114 mbox->vport = vport; 4111 mbox->vport = vport;
4115 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4112 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4116 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4113 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4179,7 +4176,8 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
4179 4176
4180 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4177 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4181 if (mbox) { 4178 if (mbox) {
4182 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox); 4179 lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
4180 mbox);
4183 mbox->vport = vport; 4181 mbox->vport = vport;
4184 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4182 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4185 mbox->context1 = NULL; 4183 mbox->context1 = NULL;
@@ -4203,7 +4201,8 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
4203 4201
4204 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4202 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4205 if (mbox) { 4203 if (mbox) {
4206 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox); 4204 lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
4205 mbox);
4207 mbox->vport = vport; 4206 mbox->vport = vport;
4208 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4207 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4209 mbox->context1 = NULL; 4208 mbox->context1 = NULL;
@@ -4653,10 +4652,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
4653 if (num_sent) 4652 if (num_sent)
4654 return; 4653 return;
4655 4654
4656 /* 4655 /* Register the VPI for SLI3, NON-NPIV only. */
4657 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
4658 * continue discovery.
4659 */
4660 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4656 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4661 !(vport->fc_flag & FC_PT2PT) && 4657 !(vport->fc_flag & FC_PT2PT) &&
4662 !(vport->fc_flag & FC_RSCN_MODE) && 4658 !(vport->fc_flag & FC_RSCN_MODE) &&
@@ -4943,7 +4939,7 @@ restart_disc:
4943 if (phba->sli_rev < LPFC_SLI_REV4) { 4939 if (phba->sli_rev < LPFC_SLI_REV4) {
4944 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 4940 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
4945 lpfc_issue_reg_vpi(phba, vport); 4941 lpfc_issue_reg_vpi(phba, vport);
4946 else { /* NPIV Not enabled */ 4942 else {
4947 lpfc_issue_clear_la(phba, vport); 4943 lpfc_issue_clear_la(phba, vport);
4948 vport->port_state = LPFC_VPORT_READY; 4944 vport->port_state = LPFC_VPORT_READY;
4949 } 4945 }
@@ -5069,7 +5065,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5069 pmb->context1 = NULL; 5065 pmb->context1 = NULL;
5070 pmb->context2 = NULL; 5066 pmb->context2 = NULL;
5071 5067
5072 ndlp->nlp_rpi = mb->un.varWords[0]; 5068 if (phba->sli_rev < LPFC_SLI_REV4)
5069 ndlp->nlp_rpi = mb->un.varWords[0];
5073 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 5070 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
5074 ndlp->nlp_type |= NLP_FABRIC; 5071 ndlp->nlp_type |= NLP_FABRIC;
5075 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 5072 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -5354,6 +5351,17 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
5354 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 5351 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
5355 shost = lpfc_shost_from_vport(vports[i]); 5352 shost = lpfc_shost_from_vport(vports[i]);
5356 spin_lock_irq(shost->host_lock); 5353 spin_lock_irq(shost->host_lock);
5354 /*
5355 * IF the CVL_RCVD bit is not set then we have sent the
5356 * flogi.
5357 * If dev_loss fires while we are waiting we do not want to
5358 * unreg the fcf.
5359 */
5360 if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
5361 spin_unlock_irq(shost->host_lock);
5362 ret = 1;
5363 goto out;
5364 }
5357 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { 5365 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
5358 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport && 5366 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
5359 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { 5367 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 86b6f7e6686a..9059524cf225 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -64,6 +64,8 @@
64#define SLI3_IOCB_CMD_SIZE 128 64#define SLI3_IOCB_CMD_SIZE 128
65#define SLI3_IOCB_RSP_SIZE 64 65#define SLI3_IOCB_RSP_SIZE 64
66 66
67#define LPFC_UNREG_ALL_RPIS_VPORT 0xffff
68#define LPFC_UNREG_ALL_DFLT_RPIS 0xffffffff
67 69
68/* vendor ID used in SCSI netlink calls */ 70/* vendor ID used in SCSI netlink calls */
69#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX) 71#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
@@ -903,6 +905,8 @@ struct RRQ { /* Structure is in Big Endian format */
903#define rrq_rxid_WORD rrq_exchg 905#define rrq_rxid_WORD rrq_exchg
904}; 906};
905 907
908#define LPFC_MAX_VFN_PER_PFN 255 /* Maximum VFs allowed per ARI */
909#define LPFC_DEF_VFN_PER_PFN 0 /* Default VFs due to platform limitation*/
906 910
907struct RTV_RSP { /* Structure is in Big Endian format */ 911struct RTV_RSP { /* Structure is in Big Endian format */
908 uint32_t ratov; 912 uint32_t ratov;
@@ -1199,7 +1203,9 @@ typedef struct {
1199#define PCI_DEVICE_ID_BALIUS 0xe131 1203#define PCI_DEVICE_ID_BALIUS 0xe131
1200#define PCI_DEVICE_ID_PROTEUS_PF 0xe180 1204#define PCI_DEVICE_ID_PROTEUS_PF 0xe180
1201#define PCI_DEVICE_ID_LANCER_FC 0xe200 1205#define PCI_DEVICE_ID_LANCER_FC 0xe200
1206#define PCI_DEVICE_ID_LANCER_FC_VF 0xe208
1202#define PCI_DEVICE_ID_LANCER_FCOE 0xe260 1207#define PCI_DEVICE_ID_LANCER_FCOE 0xe260
1208#define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268
1203#define PCI_DEVICE_ID_SAT_SMB 0xf011 1209#define PCI_DEVICE_ID_SAT_SMB 0xf011
1204#define PCI_DEVICE_ID_SAT_MID 0xf015 1210#define PCI_DEVICE_ID_SAT_MID 0xf015
1205#define PCI_DEVICE_ID_RFLY 0xf095 1211#define PCI_DEVICE_ID_RFLY 0xf095
@@ -3021,7 +3027,7 @@ typedef struct {
3021#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t)) 3027#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t))
3022#define MAILBOX_HBA_EXT_OFFSET 0x100 3028#define MAILBOX_HBA_EXT_OFFSET 0x100
3023/* max mbox xmit size is a page size for sysfs IO operations */ 3029/* max mbox xmit size is a page size for sysfs IO operations */
3024#define MAILBOX_MAX_XMIT_SIZE PAGE_SIZE 3030#define MAILBOX_SYSFS_MAX 4096
3025 3031
3026typedef union { 3032typedef union {
3027 uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/ 3033 uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 4dff668ebdad..11e26a26b5d1 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -170,6 +170,25 @@ struct lpfc_sli_intf {
170#define LPFC_PCI_FUNC3 3 170#define LPFC_PCI_FUNC3 3
171#define LPFC_PCI_FUNC4 4 171#define LPFC_PCI_FUNC4 4
172 172
173/* SLI4 interface type-2 control register offsets */
174#define LPFC_CTL_PORT_SEM_OFFSET 0x400
175#define LPFC_CTL_PORT_STA_OFFSET 0x404
176#define LPFC_CTL_PORT_CTL_OFFSET 0x408
177#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
178#define LPFC_CTL_PORT_ER2_OFFSET 0x410
179#define LPFC_CTL_PDEV_CTL_OFFSET 0x414
180
181/* Some SLI4 interface type-2 PDEV_CTL register bits */
182#define LPFC_CTL_PDEV_CTL_DRST 0x00000001
183#define LPFC_CTL_PDEV_CTL_FRST 0x00000002
184#define LPFC_CTL_PDEV_CTL_DD 0x00000004
185#define LPFC_CTL_PDEV_CTL_LC 0x00000008
186#define LPFC_CTL_PDEV_CTL_FRL_ALL 0x00
187#define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE 0x10
188#define LPFC_CTL_PDEV_CTL_FRL_NIC 0x20
189
190#define LPFC_FW_DUMP_REQUEST (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST)
191
173/* Active interrupt test count */ 192/* Active interrupt test count */
174#define LPFC_ACT_INTR_CNT 4 193#define LPFC_ACT_INTR_CNT 4
175 194
@@ -210,9 +229,26 @@ struct ulp_bde64 {
210 229
211struct lpfc_sli4_flags { 230struct lpfc_sli4_flags {
212 uint32_t word0; 231 uint32_t word0;
213#define lpfc_fip_flag_SHIFT 0 232#define lpfc_idx_rsrc_rdy_SHIFT 0
214#define lpfc_fip_flag_MASK 0x00000001 233#define lpfc_idx_rsrc_rdy_MASK 0x00000001
215#define lpfc_fip_flag_WORD word0 234#define lpfc_idx_rsrc_rdy_WORD word0
235#define LPFC_IDX_RSRC_RDY 1
236#define lpfc_xri_rsrc_rdy_SHIFT 1
237#define lpfc_xri_rsrc_rdy_MASK 0x00000001
238#define lpfc_xri_rsrc_rdy_WORD word0
239#define LPFC_XRI_RSRC_RDY 1
240#define lpfc_rpi_rsrc_rdy_SHIFT 2
241#define lpfc_rpi_rsrc_rdy_MASK 0x00000001
242#define lpfc_rpi_rsrc_rdy_WORD word0
243#define LPFC_RPI_RSRC_RDY 1
244#define lpfc_vpi_rsrc_rdy_SHIFT 3
245#define lpfc_vpi_rsrc_rdy_MASK 0x00000001
246#define lpfc_vpi_rsrc_rdy_WORD word0
247#define LPFC_VPI_RSRC_RDY 1
248#define lpfc_vfi_rsrc_rdy_SHIFT 4
249#define lpfc_vfi_rsrc_rdy_MASK 0x00000001
250#define lpfc_vfi_rsrc_rdy_WORD word0
251#define LPFC_VFI_RSRC_RDY 1
216}; 252};
217 253
218struct sli4_bls_rsp { 254struct sli4_bls_rsp {
@@ -739,6 +775,12 @@ union lpfc_sli4_cfg_shdr {
739#define lpfc_mbox_hdr_version_SHIFT 0 775#define lpfc_mbox_hdr_version_SHIFT 0
740#define lpfc_mbox_hdr_version_MASK 0x000000FF 776#define lpfc_mbox_hdr_version_MASK 0x000000FF
741#define lpfc_mbox_hdr_version_WORD word9 777#define lpfc_mbox_hdr_version_WORD word9
778#define lpfc_mbox_hdr_pf_num_SHIFT 16
779#define lpfc_mbox_hdr_pf_num_MASK 0x000000FF
780#define lpfc_mbox_hdr_pf_num_WORD word9
781#define lpfc_mbox_hdr_vh_num_SHIFT 24
782#define lpfc_mbox_hdr_vh_num_MASK 0x000000FF
783#define lpfc_mbox_hdr_vh_num_WORD word9
742#define LPFC_Q_CREATE_VERSION_2 2 784#define LPFC_Q_CREATE_VERSION_2 2
743#define LPFC_Q_CREATE_VERSION_1 1 785#define LPFC_Q_CREATE_VERSION_1 1
744#define LPFC_Q_CREATE_VERSION_0 0 786#define LPFC_Q_CREATE_VERSION_0 0
@@ -766,12 +808,22 @@ union lpfc_sli4_cfg_shdr {
766 } response; 808 } response;
767}; 809};
768 810
769/* Mailbox structures */ 811/* Mailbox Header structures.
812 * struct mbox_header is defined for first generation SLI4_CFG mailbox
813 * calls deployed for BE-based ports.
814 *
815 * struct sli4_mbox_header is defined for second generation SLI4
816 * ports that don't deploy the SLI4_CFG mechanism.
817 */
770struct mbox_header { 818struct mbox_header {
771 struct lpfc_sli4_cfg_mhdr cfg_mhdr; 819 struct lpfc_sli4_cfg_mhdr cfg_mhdr;
772 union lpfc_sli4_cfg_shdr cfg_shdr; 820 union lpfc_sli4_cfg_shdr cfg_shdr;
773}; 821};
774 822
823#define LPFC_EXTENT_LOCAL 0
824#define LPFC_TIMEOUT_DEFAULT 0
825#define LPFC_EXTENT_VERSION_DEFAULT 0
826
775/* Subsystem Definitions */ 827/* Subsystem Definitions */
776#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1 828#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
777#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC 829#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
@@ -794,6 +846,13 @@ struct mbox_header {
794#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A 846#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
795#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D 847#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
796#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A 848#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
849#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
850#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
851#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C
852#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D
853#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0
854#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4
855#define LPFC_MBOX_OPCODE_WRITE_OBJECT 0xAC
797#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5 856#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5
798 857
799/* FCoE Opcodes */ 858/* FCoE Opcodes */
@@ -808,6 +867,8 @@ struct mbox_header {
808#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A 867#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
809#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B 868#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
810#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10 869#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10
870#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22
871#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23
811 872
812/* Mailbox command structures */ 873/* Mailbox command structures */
813struct eq_context { 874struct eq_context {
@@ -1210,6 +1271,187 @@ struct lpfc_mbx_mq_destroy {
1210 } u; 1271 } u;
1211}; 1272};
1212 1273
1274/* Start Gen 2 SLI4 Mailbox definitions: */
1275
1276/* Define allocate-ready Gen 2 SLI4 FCoE Resource Extent Types. */
1277#define LPFC_RSC_TYPE_FCOE_VFI 0x20
1278#define LPFC_RSC_TYPE_FCOE_VPI 0x21
1279#define LPFC_RSC_TYPE_FCOE_RPI 0x22
1280#define LPFC_RSC_TYPE_FCOE_XRI 0x23
1281
1282struct lpfc_mbx_get_rsrc_extent_info {
1283 struct mbox_header header;
1284 union {
1285 struct {
1286 uint32_t word4;
1287#define lpfc_mbx_get_rsrc_extent_info_type_SHIFT 0
1288#define lpfc_mbx_get_rsrc_extent_info_type_MASK 0x0000FFFF
1289#define lpfc_mbx_get_rsrc_extent_info_type_WORD word4
1290 } req;
1291 struct {
1292 uint32_t word4;
1293#define lpfc_mbx_get_rsrc_extent_info_cnt_SHIFT 0
1294#define lpfc_mbx_get_rsrc_extent_info_cnt_MASK 0x0000FFFF
1295#define lpfc_mbx_get_rsrc_extent_info_cnt_WORD word4
1296#define lpfc_mbx_get_rsrc_extent_info_size_SHIFT 16
1297#define lpfc_mbx_get_rsrc_extent_info_size_MASK 0x0000FFFF
1298#define lpfc_mbx_get_rsrc_extent_info_size_WORD word4
1299 } rsp;
1300 } u;
1301};
1302
1303struct lpfc_id_range {
1304 uint32_t word5;
1305#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0
1306#define lpfc_mbx_rsrc_id_word4_0_MASK 0x0000FFFF
1307#define lpfc_mbx_rsrc_id_word4_0_WORD word5
1308#define lpfc_mbx_rsrc_id_word4_1_SHIFT 16
1309#define lpfc_mbx_rsrc_id_word4_1_MASK 0x0000FFFF
1310#define lpfc_mbx_rsrc_id_word4_1_WORD word5
1311};
1312
1313struct lpfc_mbx_set_link_diag_state {
1314 struct mbox_header header;
1315 union {
1316 struct {
1317 uint32_t word0;
1318#define lpfc_mbx_set_diag_state_diag_SHIFT 0
1319#define lpfc_mbx_set_diag_state_diag_MASK 0x00000001
1320#define lpfc_mbx_set_diag_state_diag_WORD word0
1321#define lpfc_mbx_set_diag_state_link_num_SHIFT 16
1322#define lpfc_mbx_set_diag_state_link_num_MASK 0x0000003F
1323#define lpfc_mbx_set_diag_state_link_num_WORD word0
1324#define lpfc_mbx_set_diag_state_link_type_SHIFT 22
1325#define lpfc_mbx_set_diag_state_link_type_MASK 0x00000003
1326#define lpfc_mbx_set_diag_state_link_type_WORD word0
1327 } req;
1328 struct {
1329 uint32_t word0;
1330 } rsp;
1331 } u;
1332};
1333
1334struct lpfc_mbx_set_link_diag_loopback {
1335 struct mbox_header header;
1336 union {
1337 struct {
1338 uint32_t word0;
1339#define lpfc_mbx_set_diag_lpbk_type_SHIFT 0
1340#define lpfc_mbx_set_diag_lpbk_type_MASK 0x00000001
1341#define lpfc_mbx_set_diag_lpbk_type_WORD word0
1342#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE 0x0
1343#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL 0x1
1344#define LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL 0x2
1345#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT 16
1346#define lpfc_mbx_set_diag_lpbk_link_num_MASK 0x0000003F
1347#define lpfc_mbx_set_diag_lpbk_link_num_WORD word0
1348#define lpfc_mbx_set_diag_lpbk_link_type_SHIFT 22
1349#define lpfc_mbx_set_diag_lpbk_link_type_MASK 0x00000003
1350#define lpfc_mbx_set_diag_lpbk_link_type_WORD word0
1351 } req;
1352 struct {
1353 uint32_t word0;
1354 } rsp;
1355 } u;
1356};
1357
1358struct lpfc_mbx_run_link_diag_test {
1359 struct mbox_header header;
1360 union {
1361 struct {
1362 uint32_t word0;
1363#define lpfc_mbx_run_diag_test_link_num_SHIFT 16
1364#define lpfc_mbx_run_diag_test_link_num_MASK 0x0000003F
1365#define lpfc_mbx_run_diag_test_link_num_WORD word0
1366#define lpfc_mbx_run_diag_test_link_type_SHIFT 22
1367#define lpfc_mbx_run_diag_test_link_type_MASK 0x00000003
1368#define lpfc_mbx_run_diag_test_link_type_WORD word0
1369 uint32_t word1;
1370#define lpfc_mbx_run_diag_test_test_id_SHIFT 0
1371#define lpfc_mbx_run_diag_test_test_id_MASK 0x0000FFFF
1372#define lpfc_mbx_run_diag_test_test_id_WORD word1
1373#define lpfc_mbx_run_diag_test_loops_SHIFT 16
1374#define lpfc_mbx_run_diag_test_loops_MASK 0x0000FFFF
1375#define lpfc_mbx_run_diag_test_loops_WORD word1
1376 uint32_t word2;
1377#define lpfc_mbx_run_diag_test_test_ver_SHIFT 0
1378#define lpfc_mbx_run_diag_test_test_ver_MASK 0x0000FFFF
1379#define lpfc_mbx_run_diag_test_test_ver_WORD word2
1380#define lpfc_mbx_run_diag_test_err_act_SHIFT 16
1381#define lpfc_mbx_run_diag_test_err_act_MASK 0x000000FF
1382#define lpfc_mbx_run_diag_test_err_act_WORD word2
1383 } req;
1384 struct {
1385 uint32_t word0;
1386 } rsp;
1387 } u;
1388};
1389
1390/*
1391 * struct lpfc_mbx_alloc_rsrc_extents:
1392 * A mbox is generically 256 bytes long. An SLI4_CONFIG mailbox requires
1393 * 6 words of header + 4 words of shared subcommand header +
1394 * 1 words of Extent-Opcode-specific header = 11 words or 44 bytes total.
1395 *
1396 * An embedded version of SLI4_CONFIG therefore has 256 - 44 = 212 bytes
1397 * for extents payload.
1398 *
1399 * 212/2 (bytes per extent) = 106 extents.
1400 * 106/2 (extents per word) = 53 words.
1401 * lpfc_id_range id is statically size to 53.
1402 *
1403 * This mailbox definition is used for ALLOC or GET_ALLOCATED
1404 * extent ranges. For ALLOC, the type and cnt are required.
1405 * For GET_ALLOCATED, only the type is required.
1406 */
1407struct lpfc_mbx_alloc_rsrc_extents {
1408 struct mbox_header header;
1409 union {
1410 struct {
1411 uint32_t word4;
1412#define lpfc_mbx_alloc_rsrc_extents_type_SHIFT 0
1413#define lpfc_mbx_alloc_rsrc_extents_type_MASK 0x0000FFFF
1414#define lpfc_mbx_alloc_rsrc_extents_type_WORD word4
1415#define lpfc_mbx_alloc_rsrc_extents_cnt_SHIFT 16
1416#define lpfc_mbx_alloc_rsrc_extents_cnt_MASK 0x0000FFFF
1417#define lpfc_mbx_alloc_rsrc_extents_cnt_WORD word4
1418 } req;
1419 struct {
1420 uint32_t word4;
1421#define lpfc_mbx_rsrc_cnt_SHIFT 0
1422#define lpfc_mbx_rsrc_cnt_MASK 0x0000FFFF
1423#define lpfc_mbx_rsrc_cnt_WORD word4
1424 struct lpfc_id_range id[53];
1425 } rsp;
1426 } u;
1427};
1428
1429/*
1430 * This is the non-embedded version of ALLOC or GET RSRC_EXTENTS. Word4 in this
1431 * structure shares the same SHIFT/MASK/WORD defines provided in the
1432 * mbx_alloc_rsrc_extents and mbx_get_alloc_rsrc_extents, word4, provided in
1433 * the structures defined above. This non-embedded structure provides for the
1434 * maximum number of extents supported by the port.
1435 */
1436struct lpfc_mbx_nembed_rsrc_extent {
1437 union lpfc_sli4_cfg_shdr cfg_shdr;
1438 uint32_t word4;
1439 struct lpfc_id_range id;
1440};
1441
1442struct lpfc_mbx_dealloc_rsrc_extents {
1443 struct mbox_header header;
1444 struct {
1445 uint32_t word4;
1446#define lpfc_mbx_dealloc_rsrc_extents_type_SHIFT 0
1447#define lpfc_mbx_dealloc_rsrc_extents_type_MASK 0x0000FFFF
1448#define lpfc_mbx_dealloc_rsrc_extents_type_WORD word4
1449 } req;
1450
1451};
1452
1453/* Start SLI4 FCoE specific mbox structures. */
1454
1213struct lpfc_mbx_post_hdr_tmpl { 1455struct lpfc_mbx_post_hdr_tmpl {
1214 struct mbox_header header; 1456 struct mbox_header header;
1215 uint32_t word10; 1457 uint32_t word10;
@@ -1229,7 +1471,7 @@ struct sli4_sge { /* SLI-4 */
1229 1471
1230 uint32_t word2; 1472 uint32_t word2;
1231#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/ 1473#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/
1232#define lpfc_sli4_sge_offset_MASK 0x00FFFFFF 1474#define lpfc_sli4_sge_offset_MASK 0x1FFFFFFF
1233#define lpfc_sli4_sge_offset_WORD word2 1475#define lpfc_sli4_sge_offset_WORD word2
1234#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets 1476#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets
1235 this flag !! */ 1477 this flag !! */
@@ -1773,61 +2015,31 @@ struct lpfc_mbx_read_rev {
1773 2015
1774struct lpfc_mbx_read_config { 2016struct lpfc_mbx_read_config {
1775 uint32_t word1; 2017 uint32_t word1;
1776#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0 2018#define lpfc_mbx_rd_conf_extnts_inuse_SHIFT 31
1777#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF 2019#define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001
1778#define lpfc_mbx_rd_conf_max_bbc_WORD word1 2020#define lpfc_mbx_rd_conf_extnts_inuse_WORD word1
1779#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8
1780#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF
1781#define lpfc_mbx_rd_conf_init_bbc_WORD word1
1782 uint32_t word2; 2021 uint32_t word2;
1783#define lpfc_mbx_rd_conf_nport_did_SHIFT 0
1784#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF
1785#define lpfc_mbx_rd_conf_nport_did_WORD word2
1786#define lpfc_mbx_rd_conf_topology_SHIFT 24 2022#define lpfc_mbx_rd_conf_topology_SHIFT 24
1787#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF 2023#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
1788#define lpfc_mbx_rd_conf_topology_WORD word2 2024#define lpfc_mbx_rd_conf_topology_WORD word2
1789 uint32_t word3; 2025 uint32_t rsvd_3;
1790#define lpfc_mbx_rd_conf_ao_SHIFT 0
1791#define lpfc_mbx_rd_conf_ao_MASK 0x00000001
1792#define lpfc_mbx_rd_conf_ao_WORD word3
1793#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8
1794#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F
1795#define lpfc_mbx_rd_conf_bb_scn_WORD word3
1796#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12
1797#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F
1798#define lpfc_mbx_rd_conf_cbb_scn_WORD word3
1799#define lpfc_mbx_rd_conf_mc_SHIFT 29
1800#define lpfc_mbx_rd_conf_mc_MASK 0x00000001
1801#define lpfc_mbx_rd_conf_mc_WORD word3
1802 uint32_t word4; 2026 uint32_t word4;
1803#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0 2027#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0
1804#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF 2028#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF
1805#define lpfc_mbx_rd_conf_e_d_tov_WORD word4 2029#define lpfc_mbx_rd_conf_e_d_tov_WORD word4
1806 uint32_t word5; 2030 uint32_t rsvd_5;
1807#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0
1808#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF
1809#define lpfc_mbx_rd_conf_lp_tov_WORD word5
1810 uint32_t word6; 2031 uint32_t word6;
1811#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0 2032#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
1812#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF 2033#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
1813#define lpfc_mbx_rd_conf_r_a_tov_WORD word6 2034#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
1814 uint32_t word7; 2035 uint32_t rsvd_7;
1815#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0 2036 uint32_t rsvd_8;
1816#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF
1817#define lpfc_mbx_rd_conf_r_t_tov_WORD word7
1818 uint32_t word8;
1819#define lpfc_mbx_rd_conf_al_tov_SHIFT 0
1820#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F
1821#define lpfc_mbx_rd_conf_al_tov_WORD word8
1822 uint32_t word9; 2037 uint32_t word9;
1823#define lpfc_mbx_rd_conf_lmt_SHIFT 0 2038#define lpfc_mbx_rd_conf_lmt_SHIFT 0
1824#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF 2039#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF
1825#define lpfc_mbx_rd_conf_lmt_WORD word9 2040#define lpfc_mbx_rd_conf_lmt_WORD word9
1826 uint32_t word10; 2041 uint32_t rsvd_10;
1827#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0 2042 uint32_t rsvd_11;
1828#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF
1829#define lpfc_mbx_rd_conf_max_alpa_WORD word10
1830 uint32_t word11_rsvd;
1831 uint32_t word12; 2043 uint32_t word12;
1832#define lpfc_mbx_rd_conf_xri_base_SHIFT 0 2044#define lpfc_mbx_rd_conf_xri_base_SHIFT 0
1833#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF 2045#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF
@@ -1857,9 +2069,6 @@ struct lpfc_mbx_read_config {
1857#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF 2069#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF
1858#define lpfc_mbx_rd_conf_vfi_count_WORD word15 2070#define lpfc_mbx_rd_conf_vfi_count_WORD word15
1859 uint32_t word16; 2071 uint32_t word16;
1860#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0
1861#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF
1862#define lpfc_mbx_rd_conf_fcfi_base_WORD word16
1863#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16 2072#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16
1864#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF 2073#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF
1865#define lpfc_mbx_rd_conf_fcfi_count_WORD word16 2074#define lpfc_mbx_rd_conf_fcfi_count_WORD word16
@@ -2169,6 +2378,12 @@ struct lpfc_sli4_parameters {
2169#define cfg_fcoe_SHIFT 0 2378#define cfg_fcoe_SHIFT 0
2170#define cfg_fcoe_MASK 0x00000001 2379#define cfg_fcoe_MASK 0x00000001
2171#define cfg_fcoe_WORD word12 2380#define cfg_fcoe_WORD word12
2381#define cfg_ext_SHIFT 1
2382#define cfg_ext_MASK 0x00000001
2383#define cfg_ext_WORD word12
2384#define cfg_hdrr_SHIFT 2
2385#define cfg_hdrr_MASK 0x00000001
2386#define cfg_hdrr_WORD word12
2172#define cfg_phwq_SHIFT 15 2387#define cfg_phwq_SHIFT 15
2173#define cfg_phwq_MASK 0x00000001 2388#define cfg_phwq_MASK 0x00000001
2174#define cfg_phwq_WORD word12 2389#define cfg_phwq_WORD word12
@@ -2198,6 +2413,145 @@ struct lpfc_mbx_get_sli4_parameters {
2198 struct lpfc_sli4_parameters sli4_parameters; 2413 struct lpfc_sli4_parameters sli4_parameters;
2199}; 2414};
2200 2415
2416struct lpfc_rscr_desc_generic {
2417#define LPFC_RSRC_DESC_WSIZE 18
2418 uint32_t desc[LPFC_RSRC_DESC_WSIZE];
2419};
2420
2421struct lpfc_rsrc_desc_pcie {
2422 uint32_t word0;
2423#define lpfc_rsrc_desc_pcie_type_SHIFT 0
2424#define lpfc_rsrc_desc_pcie_type_MASK 0x000000ff
2425#define lpfc_rsrc_desc_pcie_type_WORD word0
2426#define LPFC_RSRC_DESC_TYPE_PCIE 0x40
2427 uint32_t word1;
2428#define lpfc_rsrc_desc_pcie_pfnum_SHIFT 0
2429#define lpfc_rsrc_desc_pcie_pfnum_MASK 0x000000ff
2430#define lpfc_rsrc_desc_pcie_pfnum_WORD word1
2431 uint32_t reserved;
2432 uint32_t word3;
2433#define lpfc_rsrc_desc_pcie_sriov_sta_SHIFT 0
2434#define lpfc_rsrc_desc_pcie_sriov_sta_MASK 0x000000ff
2435#define lpfc_rsrc_desc_pcie_sriov_sta_WORD word3
2436#define lpfc_rsrc_desc_pcie_pf_sta_SHIFT 8
2437#define lpfc_rsrc_desc_pcie_pf_sta_MASK 0x000000ff
2438#define lpfc_rsrc_desc_pcie_pf_sta_WORD word3
2439#define lpfc_rsrc_desc_pcie_pf_type_SHIFT 16
2440#define lpfc_rsrc_desc_pcie_pf_type_MASK 0x000000ff
2441#define lpfc_rsrc_desc_pcie_pf_type_WORD word3
2442 uint32_t word4;
2443#define lpfc_rsrc_desc_pcie_nr_virtfn_SHIFT 0
2444#define lpfc_rsrc_desc_pcie_nr_virtfn_MASK 0x0000ffff
2445#define lpfc_rsrc_desc_pcie_nr_virtfn_WORD word4
2446};
2447
2448struct lpfc_rsrc_desc_fcfcoe {
2449 uint32_t word0;
2450#define lpfc_rsrc_desc_fcfcoe_type_SHIFT 0
2451#define lpfc_rsrc_desc_fcfcoe_type_MASK 0x000000ff
2452#define lpfc_rsrc_desc_fcfcoe_type_WORD word0
2453#define LPFC_RSRC_DESC_TYPE_FCFCOE 0x43
2454 uint32_t word1;
2455#define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT 0
2456#define lpfc_rsrc_desc_fcfcoe_vfnum_MASK 0x000000ff
2457#define lpfc_rsrc_desc_fcfcoe_vfnum_WORD word1
2458#define lpfc_rsrc_desc_fcfcoe_pfnum_SHIFT 16
2459#define lpfc_rsrc_desc_fcfcoe_pfnum_MASK 0x000007ff
2460#define lpfc_rsrc_desc_fcfcoe_pfnum_WORD word1
2461 uint32_t word2;
2462#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_SHIFT 0
2463#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_MASK 0x0000ffff
2464#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_WORD word2
2465#define lpfc_rsrc_desc_fcfcoe_xri_cnt_SHIFT 16
2466#define lpfc_rsrc_desc_fcfcoe_xri_cnt_MASK 0x0000ffff
2467#define lpfc_rsrc_desc_fcfcoe_xri_cnt_WORD word2
2468 uint32_t word3;
2469#define lpfc_rsrc_desc_fcfcoe_wq_cnt_SHIFT 0
2470#define lpfc_rsrc_desc_fcfcoe_wq_cnt_MASK 0x0000ffff
2471#define lpfc_rsrc_desc_fcfcoe_wq_cnt_WORD word3
2472#define lpfc_rsrc_desc_fcfcoe_rq_cnt_SHIFT 16
2473#define lpfc_rsrc_desc_fcfcoe_rq_cnt_MASK 0x0000ffff
2474#define lpfc_rsrc_desc_fcfcoe_rq_cnt_WORD word3
2475 uint32_t word4;
2476#define lpfc_rsrc_desc_fcfcoe_cq_cnt_SHIFT 0
2477#define lpfc_rsrc_desc_fcfcoe_cq_cnt_MASK 0x0000ffff
2478#define lpfc_rsrc_desc_fcfcoe_cq_cnt_WORD word4
2479#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_SHIFT 16
2480#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_MASK 0x0000ffff
2481#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_WORD word4
2482 uint32_t word5;
2483#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_SHIFT 0
2484#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_MASK 0x0000ffff
2485#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_WORD word5
2486#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_SHIFT 16
2487#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_MASK 0x0000ffff
2488#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_WORD word5
2489 uint32_t word6;
2490 uint32_t word7;
2491 uint32_t word8;
2492 uint32_t word9;
2493 uint32_t word10;
2494 uint32_t word11;
2495 uint32_t word12;
2496 uint32_t word13;
2497#define lpfc_rsrc_desc_fcfcoe_lnk_nr_SHIFT 0
2498#define lpfc_rsrc_desc_fcfcoe_lnk_nr_MASK 0x0000003f
2499#define lpfc_rsrc_desc_fcfcoe_lnk_nr_WORD word13
2500#define lpfc_rsrc_desc_fcfcoe_lnk_tp_SHIFT 6
2501#define lpfc_rsrc_desc_fcfcoe_lnk_tp_MASK 0x00000003
2502#define lpfc_rsrc_desc_fcfcoe_lnk_tp_WORD word13
2503#define lpfc_rsrc_desc_fcfcoe_lmc_SHIFT 8
2504#define lpfc_rsrc_desc_fcfcoe_lmc_MASK 0x00000001
2505#define lpfc_rsrc_desc_fcfcoe_lmc_WORD word13
2506#define lpfc_rsrc_desc_fcfcoe_lld_SHIFT 9
2507#define lpfc_rsrc_desc_fcfcoe_lld_MASK 0x00000001
2508#define lpfc_rsrc_desc_fcfcoe_lld_WORD word13
2509#define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT 16
2510#define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK 0x0000ffff
2511#define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD word13
2512};
2513
2514struct lpfc_func_cfg {
2515#define LPFC_RSRC_DESC_MAX_NUM 2
2516 uint32_t rsrc_desc_count;
2517 struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
2518};
2519
2520struct lpfc_mbx_get_func_cfg {
2521 struct mbox_header header;
2522#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0
2523#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1
2524#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2
2525 struct lpfc_func_cfg func_cfg;
2526};
2527
2528struct lpfc_prof_cfg {
2529#define LPFC_RSRC_DESC_MAX_NUM 2
2530 uint32_t rsrc_desc_count;
2531 struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
2532};
2533
2534struct lpfc_mbx_get_prof_cfg {
2535 struct mbox_header header;
2536#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0
2537#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1
2538#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2
2539 union {
2540 struct {
2541 uint32_t word10;
2542#define lpfc_mbx_get_prof_cfg_prof_id_SHIFT 0
2543#define lpfc_mbx_get_prof_cfg_prof_id_MASK 0x000000ff
2544#define lpfc_mbx_get_prof_cfg_prof_id_WORD word10
2545#define lpfc_mbx_get_prof_cfg_prof_tp_SHIFT 8
2546#define lpfc_mbx_get_prof_cfg_prof_tp_MASK 0x00000003
2547#define lpfc_mbx_get_prof_cfg_prof_tp_WORD word10
2548 } request;
2549 struct {
2550 struct lpfc_prof_cfg prof_cfg;
2551 } response;
2552 } u;
2553};
2554
2201/* Mailbox Completion Queue Error Messages */ 2555/* Mailbox Completion Queue Error Messages */
2202#define MB_CQE_STATUS_SUCCESS 0x0 2556#define MB_CQE_STATUS_SUCCESS 0x0
2203#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1 2557#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
@@ -2206,6 +2560,29 @@ struct lpfc_mbx_get_sli4_parameters {
2206#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4 2560#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
2207#define MB_CQE_STATUS_DMA_FAILED 0x5 2561#define MB_CQE_STATUS_DMA_FAILED 0x5
2208 2562
2563#define LPFC_MBX_WR_CONFIG_MAX_BDE 8
2564struct lpfc_mbx_wr_object {
2565 struct mbox_header header;
2566 union {
2567 struct {
2568 uint32_t word4;
2569#define lpfc_wr_object_eof_SHIFT 31
2570#define lpfc_wr_object_eof_MASK 0x00000001
2571#define lpfc_wr_object_eof_WORD word4
2572#define lpfc_wr_object_write_length_SHIFT 0
2573#define lpfc_wr_object_write_length_MASK 0x00FFFFFF
2574#define lpfc_wr_object_write_length_WORD word4
2575 uint32_t write_offset;
2576 uint32_t object_name[26];
2577 uint32_t bde_count;
2578 struct ulp_bde64 bde[LPFC_MBX_WR_CONFIG_MAX_BDE];
2579 } request;
2580 struct {
2581 uint32_t actual_write_length;
2582 } response;
2583 } u;
2584};
2585
2209/* mailbox queue entry structure */ 2586/* mailbox queue entry structure */
2210struct lpfc_mqe { 2587struct lpfc_mqe {
2211 uint32_t word0; 2588 uint32_t word0;
@@ -2241,6 +2618,9 @@ struct lpfc_mqe {
2241 struct lpfc_mbx_cq_destroy cq_destroy; 2618 struct lpfc_mbx_cq_destroy cq_destroy;
2242 struct lpfc_mbx_wq_destroy wq_destroy; 2619 struct lpfc_mbx_wq_destroy wq_destroy;
2243 struct lpfc_mbx_rq_destroy rq_destroy; 2620 struct lpfc_mbx_rq_destroy rq_destroy;
2621 struct lpfc_mbx_get_rsrc_extent_info rsrc_extent_info;
2622 struct lpfc_mbx_alloc_rsrc_extents alloc_rsrc_extents;
2623 struct lpfc_mbx_dealloc_rsrc_extents dealloc_rsrc_extents;
2244 struct lpfc_mbx_post_sgl_pages post_sgl_pages; 2624 struct lpfc_mbx_post_sgl_pages post_sgl_pages;
2245 struct lpfc_mbx_nembed_cmd nembed_cmd; 2625 struct lpfc_mbx_nembed_cmd nembed_cmd;
2246 struct lpfc_mbx_read_rev read_rev; 2626 struct lpfc_mbx_read_rev read_rev;
@@ -2252,7 +2632,13 @@ struct lpfc_mqe {
2252 struct lpfc_mbx_supp_pages supp_pages; 2632 struct lpfc_mbx_supp_pages supp_pages;
2253 struct lpfc_mbx_pc_sli4_params sli4_params; 2633 struct lpfc_mbx_pc_sli4_params sli4_params;
2254 struct lpfc_mbx_get_sli4_parameters get_sli4_parameters; 2634 struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
2635 struct lpfc_mbx_set_link_diag_state link_diag_state;
2636 struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
2637 struct lpfc_mbx_run_link_diag_test link_diag_test;
2638 struct lpfc_mbx_get_func_cfg get_func_cfg;
2639 struct lpfc_mbx_get_prof_cfg get_prof_cfg;
2255 struct lpfc_mbx_nop nop; 2640 struct lpfc_mbx_nop nop;
2641 struct lpfc_mbx_wr_object wr_object;
2256 } un; 2642 } un;
2257}; 2643};
2258 2644
@@ -2458,7 +2844,7 @@ struct lpfc_bmbx_create {
2458#define SGL_ALIGN_SZ 64 2844#define SGL_ALIGN_SZ 64
2459#define SGL_PAGE_SIZE 4096 2845#define SGL_PAGE_SIZE 4096
2460/* align SGL addr on a size boundary - adjust address up */ 2846/* align SGL addr on a size boundary - adjust address up */
2461#define NO_XRI ((uint16_t)-1) 2847#define NO_XRI 0xffff
2462 2848
2463struct wqe_common { 2849struct wqe_common {
2464 uint32_t word6; 2850 uint32_t word6;
@@ -2798,9 +3184,28 @@ union lpfc_wqe {
2798 struct gen_req64_wqe gen_req; 3184 struct gen_req64_wqe gen_req;
2799}; 3185};
2800 3186
3187#define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001
3188#define LPFC_FILE_TYPE_GROUP 0xf7
3189#define LPFC_FILE_ID_GROUP 0xa2
3190struct lpfc_grp_hdr {
3191 uint32_t size;
3192 uint32_t magic_number;
3193 uint32_t word2;
3194#define lpfc_grp_hdr_file_type_SHIFT 24
3195#define lpfc_grp_hdr_file_type_MASK 0x000000FF
3196#define lpfc_grp_hdr_file_type_WORD word2
3197#define lpfc_grp_hdr_id_SHIFT 16
3198#define lpfc_grp_hdr_id_MASK 0x000000FF
3199#define lpfc_grp_hdr_id_WORD word2
3200 uint8_t rev_name[128];
3201};
3202
2801#define FCP_COMMAND 0x0 3203#define FCP_COMMAND 0x0
2802#define FCP_COMMAND_DATA_OUT 0x1 3204#define FCP_COMMAND_DATA_OUT 0x1
2803#define ELS_COMMAND_NON_FIP 0xC 3205#define ELS_COMMAND_NON_FIP 0xC
2804#define ELS_COMMAND_FIP 0xD 3206#define ELS_COMMAND_FIP 0xD
2805#define OTHER_COMMAND 0x8 3207#define OTHER_COMMAND 0x8
2806 3208
3209#define LPFC_FW_DUMP 1
3210#define LPFC_FW_RESET 2
3211#define LPFC_DV_RESET 3
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7dda036a1af3..148b98ddbb1d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -30,6 +30,7 @@
30#include <linux/ctype.h> 30#include <linux/ctype.h>
31#include <linux/aer.h> 31#include <linux/aer.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/firmware.h>
33 34
34#include <scsi/scsi.h> 35#include <scsi/scsi.h>
35#include <scsi/scsi_device.h> 36#include <scsi/scsi_device.h>
@@ -211,7 +212,6 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
211 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 212 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
212 if (!lpfc_vpd_data) 213 if (!lpfc_vpd_data)
213 goto out_free_mbox; 214 goto out_free_mbox;
214
215 do { 215 do {
216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -309,6 +309,45 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
309} 309}
310 310
311/** 311/**
312 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
313 * cfg_soft_wwnn, cfg_soft_wwpn
314 * @vport: pointer to lpfc vport data structure.
315 *
316 *
317 * Return codes
318 * None.
319 **/
320void
321lpfc_update_vport_wwn(struct lpfc_vport *vport)
322{
323 /* If the soft name exists then update it using the service params */
324 if (vport->phba->cfg_soft_wwnn)
325 u64_to_wwn(vport->phba->cfg_soft_wwnn,
326 vport->fc_sparam.nodeName.u.wwn);
327 if (vport->phba->cfg_soft_wwpn)
328 u64_to_wwn(vport->phba->cfg_soft_wwpn,
329 vport->fc_sparam.portName.u.wwn);
330
331 /*
332 * If the name is empty or there exists a soft name
333 * then copy the service params name, otherwise use the fc name
334 */
335 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
336 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
337 sizeof(struct lpfc_name));
338 else
339 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
340 sizeof(struct lpfc_name));
341
342 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
343 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
344 sizeof(struct lpfc_name));
345 else
346 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
347 sizeof(struct lpfc_name));
348}
349
350/**
312 * lpfc_config_port_post - Perform lpfc initialization after config port 351 * lpfc_config_port_post - Perform lpfc initialization after config port
313 * @phba: pointer to lpfc hba data structure. 352 * @phba: pointer to lpfc hba data structure.
314 * 353 *
@@ -377,17 +416,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
377 lpfc_mbuf_free(phba, mp->virt, mp->phys); 416 lpfc_mbuf_free(phba, mp->virt, mp->phys);
378 kfree(mp); 417 kfree(mp);
379 pmb->context1 = NULL; 418 pmb->context1 = NULL;
380 419 lpfc_update_vport_wwn(vport);
381 if (phba->cfg_soft_wwnn)
382 u64_to_wwn(phba->cfg_soft_wwnn,
383 vport->fc_sparam.nodeName.u.wwn);
384 if (phba->cfg_soft_wwpn)
385 u64_to_wwn(phba->cfg_soft_wwpn,
386 vport->fc_sparam.portName.u.wwn);
387 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
388 sizeof (struct lpfc_name));
389 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
390 sizeof (struct lpfc_name));
391 420
392 /* Update the fc_host data structures with new wwn. */ 421 /* Update the fc_host data structures with new wwn. */
393 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 422 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
@@ -573,7 +602,6 @@ lpfc_config_port_post(struct lpfc_hba *phba)
573 /* Clear all pending interrupts */ 602 /* Clear all pending interrupts */
574 writel(0xffffffff, phba->HAregaddr); 603 writel(0xffffffff, phba->HAregaddr);
575 readl(phba->HAregaddr); /* flush */ 604 readl(phba->HAregaddr); /* flush */
576
577 phba->link_state = LPFC_HBA_ERROR; 605 phba->link_state = LPFC_HBA_ERROR;
578 if (rc != MBX_BUSY) 606 if (rc != MBX_BUSY)
579 mempool_free(pmb, phba->mbox_mem_pool); 607 mempool_free(pmb, phba->mbox_mem_pool);
@@ -1755,7 +1783,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1755 && descp && descp[0] != '\0') 1783 && descp && descp[0] != '\0')
1756 return; 1784 return;
1757 1785
1758 if (phba->lmt & LMT_10Gb) 1786 if (phba->lmt & LMT_16Gb)
1787 max_speed = 16;
1788 else if (phba->lmt & LMT_10Gb)
1759 max_speed = 10; 1789 max_speed = 10;
1760 else if (phba->lmt & LMT_8Gb) 1790 else if (phba->lmt & LMT_8Gb)
1761 max_speed = 8; 1791 max_speed = 8;
@@ -1922,12 +1952,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1922 "Fibre Channel Adapter"}; 1952 "Fibre Channel Adapter"};
1923 break; 1953 break;
1924 case PCI_DEVICE_ID_LANCER_FC: 1954 case PCI_DEVICE_ID_LANCER_FC:
1925 oneConnect = 1; 1955 case PCI_DEVICE_ID_LANCER_FC_VF:
1926 m = (typeof(m)){"Undefined", "PCIe", "Fibre Channel Adapter"}; 1956 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
1927 break; 1957 break;
1928 case PCI_DEVICE_ID_LANCER_FCOE: 1958 case PCI_DEVICE_ID_LANCER_FCOE:
1959 case PCI_DEVICE_ID_LANCER_FCOE_VF:
1929 oneConnect = 1; 1960 oneConnect = 1;
1930 m = (typeof(m)){"Undefined", "PCIe", "FCoE"}; 1961 m = (typeof(m)){"OCe50100", "PCIe", "FCoE"};
1931 break; 1962 break;
1932 default: 1963 default:
1933 m = (typeof(m)){"Unknown", "", ""}; 1964 m = (typeof(m)){"Unknown", "", ""};
@@ -1936,7 +1967,8 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1936 1967
1937 if (mdp && mdp[0] == '\0') 1968 if (mdp && mdp[0] == '\0')
1938 snprintf(mdp, 79,"%s", m.name); 1969 snprintf(mdp, 79,"%s", m.name);
1939 /* oneConnect hba requires special processing, they are all initiators 1970 /*
1971 * oneConnect hba requires special processing, they are all initiators
1940 * and we put the port number on the end 1972 * and we put the port number on the end
1941 */ 1973 */
1942 if (descp && descp[0] == '\0') { 1974 if (descp && descp[0] == '\0') {
@@ -2656,6 +2688,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2656 kfree(io); 2688 kfree(io);
2657 phba->total_iocbq_bufs--; 2689 phba->total_iocbq_bufs--;
2658 } 2690 }
2691
2659 spin_unlock_irq(&phba->hbalock); 2692 spin_unlock_irq(&phba->hbalock);
2660 return 0; 2693 return 0;
2661} 2694}
@@ -3612,6 +3645,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3612 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3645 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3613 "2718 Clear Virtual Link Received for VPI 0x%x" 3646 "2718 Clear Virtual Link Received for VPI 0x%x"
3614 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 3647 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
3648
3615 vport = lpfc_find_vport_by_vpid(phba, 3649 vport = lpfc_find_vport_by_vpid(phba,
3616 acqe_fip->index - phba->vpi_base); 3650 acqe_fip->index - phba->vpi_base);
3617 ndlp = lpfc_sli4_perform_vport_cvl(vport); 3651 ndlp = lpfc_sli4_perform_vport_cvl(vport);
@@ -3935,6 +3969,10 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba)
3935 pci_try_set_mwi(pdev); 3969 pci_try_set_mwi(pdev);
3936 pci_save_state(pdev); 3970 pci_save_state(pdev);
3937 3971
3972 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
3973 if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
3974 pdev->needs_freset = 1;
3975
3938 return 0; 3976 return 0;
3939 3977
3940out_disable_device: 3978out_disable_device:
@@ -3997,6 +4035,36 @@ lpfc_reset_hba(struct lpfc_hba *phba)
3997} 4035}
3998 4036
3999/** 4037/**
4038 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4039 * @phba: pointer to lpfc hba data structure.
4040 * @nr_vfn: number of virtual functions to be enabled.
4041 *
4042 * This function enables the PCI SR-IOV virtual functions to a physical
4043 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4044 * enable the number of virtual functions to the physical function. As
4045 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4046 * API call does not considered as an error condition for most of the device.
4047 **/
4048int
4049lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4050{
4051 struct pci_dev *pdev = phba->pcidev;
4052 int rc;
4053
4054 rc = pci_enable_sriov(pdev, nr_vfn);
4055 if (rc) {
4056 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4057 "2806 Failed to enable sriov on this device "
4058 "with vfn number nr_vf:%d, rc:%d\n",
4059 nr_vfn, rc);
4060 } else
4061 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4062 "2807 Successful enable sriov on this device "
4063 "with vfn number nr_vf:%d\n", nr_vfn);
4064 return rc;
4065}
4066
4067/**
4000 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 4068 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
4001 * @phba: pointer to lpfc hba data structure. 4069 * @phba: pointer to lpfc hba data structure.
4002 * 4070 *
@@ -4011,6 +4079,7 @@ static int
4011lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 4079lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4012{ 4080{
4013 struct lpfc_sli *psli; 4081 struct lpfc_sli *psli;
4082 int rc;
4014 4083
4015 /* 4084 /*
4016 * Initialize timers used by driver 4085 * Initialize timers used by driver
@@ -4085,6 +4154,23 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4085 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 4154 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
4086 return -ENOMEM; 4155 return -ENOMEM;
4087 4156
4157 /*
4158 * Enable sr-iov virtual functions if supported and configured
4159 * through the module parameter.
4160 */
4161 if (phba->cfg_sriov_nr_virtfn > 0) {
4162 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4163 phba->cfg_sriov_nr_virtfn);
4164 if (rc) {
4165 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4166 "2808 Requested number of SR-IOV "
4167 "virtual functions (%d) is not "
4168 "supported\n",
4169 phba->cfg_sriov_nr_virtfn);
4170 phba->cfg_sriov_nr_virtfn = 0;
4171 }
4172 }
4173
4088 return 0; 4174 return 0;
4089} 4175}
4090 4176
@@ -4161,6 +4247,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4161 phba->fcf.redisc_wait.data = (unsigned long)phba; 4247 phba->fcf.redisc_wait.data = (unsigned long)phba;
4162 4248
4163 /* 4249 /*
4250 * Control structure for handling external multi-buffer mailbox
4251 * command pass-through.
4252 */
4253 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
4254 sizeof(struct lpfc_mbox_ext_buf_ctx));
4255 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4256
4257 /*
4164 * We need to do a READ_CONFIG mailbox command here before 4258 * We need to do a READ_CONFIG mailbox command here before
4165 * calling lpfc_get_cfgparam. For VFs this will report the 4259 * calling lpfc_get_cfgparam. For VFs this will report the
4166 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 4260 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
@@ -4233,7 +4327,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4233 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 4327 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
4234 4328
4235 /* 4329 /*
4236 * Initialize dirver internal slow-path work queues 4330 * Initialize driver internal slow-path work queues
4237 */ 4331 */
4238 4332
4239 /* Driver internel slow-path CQ Event pool */ 4333 /* Driver internel slow-path CQ Event pool */
@@ -4249,6 +4343,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4249 /* Receive queue CQ Event work queue list */ 4343 /* Receive queue CQ Event work queue list */
4250 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 4344 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4251 4345
4346 /* Initialize extent block lists. */
4347 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
4348 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
4349 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
4350 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
4351
4252 /* Initialize the driver internal SLI layer lists. */ 4352 /* Initialize the driver internal SLI layer lists. */
4253 lpfc_sli_setup(phba); 4353 lpfc_sli_setup(phba);
4254 lpfc_sli_queue_setup(phba); 4354 lpfc_sli_queue_setup(phba);
@@ -4323,9 +4423,19 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4323 } 4423 }
4324 /* 4424 /*
4325 * Get sli4 parameters that override parameters from Port capabilities. 4425 * Get sli4 parameters that override parameters from Port capabilities.
4326 * If this call fails it is not a critical error so continue loading. 4426 * If this call fails, it isn't critical unless the SLI4 parameters come
4427 * back in conflict.
4327 */ 4428 */
4328 lpfc_get_sli4_parameters(phba, mboxq); 4429 rc = lpfc_get_sli4_parameters(phba, mboxq);
4430 if (rc) {
4431 if (phba->sli4_hba.extents_in_use &&
4432 phba->sli4_hba.rpi_hdrs_in_use) {
4433 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4434 "2999 Unsupported SLI4 Parameters "
4435 "Extents and RPI headers enabled.\n");
4436 goto out_free_bsmbx;
4437 }
4438 }
4329 mempool_free(mboxq, phba->mbox_mem_pool); 4439 mempool_free(mboxq, phba->mbox_mem_pool);
4330 /* Create all the SLI4 queues */ 4440 /* Create all the SLI4 queues */
4331 rc = lpfc_sli4_queue_create(phba); 4441 rc = lpfc_sli4_queue_create(phba);
@@ -4350,7 +4460,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4350 "1430 Failed to initialize sgl list.\n"); 4460 "1430 Failed to initialize sgl list.\n");
4351 goto out_free_sgl_list; 4461 goto out_free_sgl_list;
4352 } 4462 }
4353
4354 rc = lpfc_sli4_init_rpi_hdrs(phba); 4463 rc = lpfc_sli4_init_rpi_hdrs(phba);
4355 if (rc) { 4464 if (rc) {
4356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4465 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -4366,6 +4475,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4366 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4475 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4367 "2759 Failed allocate memory for FCF round " 4476 "2759 Failed allocate memory for FCF round "
4368 "robin failover bmask\n"); 4477 "robin failover bmask\n");
4478 rc = -ENOMEM;
4369 goto out_remove_rpi_hdrs; 4479 goto out_remove_rpi_hdrs;
4370 } 4480 }
4371 4481
@@ -4375,6 +4485,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4375 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4485 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4376 "2572 Failed allocate memory for fast-path " 4486 "2572 Failed allocate memory for fast-path "
4377 "per-EQ handle array\n"); 4487 "per-EQ handle array\n");
4488 rc = -ENOMEM;
4378 goto out_free_fcf_rr_bmask; 4489 goto out_free_fcf_rr_bmask;
4379 } 4490 }
4380 4491
@@ -4384,9 +4495,27 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4384 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4385 "2573 Failed allocate memory for msi-x " 4496 "2573 Failed allocate memory for msi-x "
4386 "interrupt vector entries\n"); 4497 "interrupt vector entries\n");
4498 rc = -ENOMEM;
4387 goto out_free_fcp_eq_hdl; 4499 goto out_free_fcp_eq_hdl;
4388 } 4500 }
4389 4501
4502 /*
4503 * Enable sr-iov virtual functions if supported and configured
4504 * through the module parameter.
4505 */
4506 if (phba->cfg_sriov_nr_virtfn > 0) {
4507 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4508 phba->cfg_sriov_nr_virtfn);
4509 if (rc) {
4510 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4511 "3020 Requested number of SR-IOV "
4512 "virtual functions (%d) is not "
4513 "supported\n",
4514 phba->cfg_sriov_nr_virtfn);
4515 phba->cfg_sriov_nr_virtfn = 0;
4516 }
4517 }
4518
4390 return rc; 4519 return rc;
4391 4520
4392out_free_fcp_eq_hdl: 4521out_free_fcp_eq_hdl:
@@ -4449,6 +4578,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4449 lpfc_sli4_cq_event_release_all(phba); 4578 lpfc_sli4_cq_event_release_all(phba);
4450 lpfc_sli4_cq_event_pool_destroy(phba); 4579 lpfc_sli4_cq_event_pool_destroy(phba);
4451 4580
4581 /* Release resource identifiers. */
4582 lpfc_sli4_dealloc_resource_identifiers(phba);
4583
4452 /* Free the bsmbx region. */ 4584 /* Free the bsmbx region. */
4453 lpfc_destroy_bootstrap_mbox(phba); 4585 lpfc_destroy_bootstrap_mbox(phba);
4454 4586
@@ -4649,6 +4781,7 @@ lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4649 "Unloading driver.\n", __func__); 4781 "Unloading driver.\n", __func__);
4650 goto out_free_iocbq; 4782 goto out_free_iocbq;
4651 } 4783 }
4784 iocbq_entry->sli4_lxritag = NO_XRI;
4652 iocbq_entry->sli4_xritag = NO_XRI; 4785 iocbq_entry->sli4_xritag = NO_XRI;
4653 4786
4654 spin_lock_irq(&phba->hbalock); 4787 spin_lock_irq(&phba->hbalock);
@@ -4746,7 +4879,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
4746 4879
4747 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4880 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4748 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4881 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4749 "2400 lpfc_init_sgl_list els %d.\n", 4882 "2400 ELS XRI count %d.\n",
4750 els_xri_cnt); 4883 els_xri_cnt);
4751 /* Initialize and populate the sglq list per host/VF. */ 4884 /* Initialize and populate the sglq list per host/VF. */
4752 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 4885 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
@@ -4779,7 +4912,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
4779 phba->sli4_hba.scsi_xri_max = 4912 phba->sli4_hba.scsi_xri_max =
4780 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4913 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4781 phba->sli4_hba.scsi_xri_cnt = 0; 4914 phba->sli4_hba.scsi_xri_cnt = 0;
4782
4783 phba->sli4_hba.lpfc_scsi_psb_array = 4915 phba->sli4_hba.lpfc_scsi_psb_array =
4784 kzalloc((sizeof(struct lpfc_scsi_buf *) * 4916 kzalloc((sizeof(struct lpfc_scsi_buf *) *
4785 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 4917 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
@@ -4802,13 +4934,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
4802 goto out_free_mem; 4934 goto out_free_mem;
4803 } 4935 }
4804 4936
4805 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
4806 if (sglq_entry->sli4_xritag == NO_XRI) {
4807 kfree(sglq_entry);
4808 printk(KERN_ERR "%s: failed to allocate XRI.\n"
4809 "Unloading driver.\n", __func__);
4810 goto out_free_mem;
4811 }
4812 sglq_entry->buff_type = GEN_BUFF_TYPE; 4937 sglq_entry->buff_type = GEN_BUFF_TYPE;
4813 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 4938 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
4814 if (sglq_entry->virt == NULL) { 4939 if (sglq_entry->virt == NULL) {
@@ -4857,24 +4982,20 @@ int
4857lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 4982lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
4858{ 4983{
4859 int rc = 0; 4984 int rc = 0;
4860 int longs;
4861 uint16_t rpi_count;
4862 struct lpfc_rpi_hdr *rpi_hdr; 4985 struct lpfc_rpi_hdr *rpi_hdr;
4863 4986
4864 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 4987 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
4865
4866 /* 4988 /*
4867 * Provision an rpi bitmask range for discovery. The total count 4989 * If the SLI4 port supports extents, posting the rpi header isn't
4868 * is the difference between max and base + 1. 4990 * required. Set the expected maximum count and let the actual value
4991 * get set when extents are fully allocated.
4869 */ 4992 */
4870 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + 4993 if (!phba->sli4_hba.rpi_hdrs_in_use) {
4871 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4994 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
4872 4995 return rc;
4873 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; 4996 }
4874 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), 4997 if (phba->sli4_hba.extents_in_use)
4875 GFP_KERNEL); 4998 return -EIO;
4876 if (!phba->sli4_hba.rpi_bmask)
4877 return -ENOMEM;
4878 4999
4879 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 5000 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
4880 if (!rpi_hdr) { 5001 if (!rpi_hdr) {
@@ -4908,11 +5029,28 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4908 struct lpfc_rpi_hdr *rpi_hdr; 5029 struct lpfc_rpi_hdr *rpi_hdr;
4909 uint32_t rpi_count; 5030 uint32_t rpi_count;
4910 5031
5032 /*
5033 * If the SLI4 port supports extents, posting the rpi header isn't
5034 * required. Set the expected maximum count and let the actual value
5035 * get set when extents are fully allocated.
5036 */
5037 if (!phba->sli4_hba.rpi_hdrs_in_use)
5038 return NULL;
5039 if (phba->sli4_hba.extents_in_use)
5040 return NULL;
5041
5042 /* The limit on the logical index is just the max_rpi count. */
4911 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 5043 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4912 phba->sli4_hba.max_cfg_param.max_rpi - 1; 5044 phba->sli4_hba.max_cfg_param.max_rpi - 1;
4913 5045
4914 spin_lock_irq(&phba->hbalock); 5046 spin_lock_irq(&phba->hbalock);
4915 curr_rpi_range = phba->sli4_hba.next_rpi; 5047 /*
5048 * Establish the starting RPI in this header block. The starting
5049 * rpi is normalized to a zero base because the physical rpi is
5050 * port based.
5051 */
5052 curr_rpi_range = phba->sli4_hba.next_rpi -
5053 phba->sli4_hba.max_cfg_param.rpi_base;
4916 spin_unlock_irq(&phba->hbalock); 5054 spin_unlock_irq(&phba->hbalock);
4917 5055
4918 /* 5056 /*
@@ -4925,6 +5063,8 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4925 else 5063 else
4926 rpi_count = LPFC_RPI_HDR_COUNT; 5064 rpi_count = LPFC_RPI_HDR_COUNT;
4927 5065
5066 if (!rpi_count)
5067 return NULL;
4928 /* 5068 /*
4929 * First allocate the protocol header region for the port. The 5069 * First allocate the protocol header region for the port. The
4930 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 5070 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
@@ -4957,12 +5097,14 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4957 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 5097 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4958 rpi_hdr->page_count = 1; 5098 rpi_hdr->page_count = 1;
4959 spin_lock_irq(&phba->hbalock); 5099 spin_lock_irq(&phba->hbalock);
4960 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; 5100
5101 /* The rpi_hdr stores the logical index only. */
5102 rpi_hdr->start_rpi = curr_rpi_range;
4961 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 5103 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4962 5104
4963 /* 5105 /*
4964 * The next_rpi stores the next module-64 rpi value to post 5106 * The next_rpi stores the next logical module-64 rpi value used
4965 * in any subsequent rpi memory region postings. 5107 * to post physical rpis in subsequent rpi postings.
4966 */ 5108 */
4967 phba->sli4_hba.next_rpi += rpi_count; 5109 phba->sli4_hba.next_rpi += rpi_count;
4968 spin_unlock_irq(&phba->hbalock); 5110 spin_unlock_irq(&phba->hbalock);
@@ -4981,15 +5123,18 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4981 * @phba: pointer to lpfc hba data structure. 5123 * @phba: pointer to lpfc hba data structure.
4982 * 5124 *
4983 * This routine is invoked to remove all memory resources allocated 5125 * This routine is invoked to remove all memory resources allocated
4984 * to support rpis. This routine presumes the caller has released all 5126 * to support rpis for SLI4 ports not supporting extents. This routine
4985 * rpis consumed by fabric or port logins and is prepared to have 5127 * presumes the caller has released all rpis consumed by fabric or port
4986 * the header pages removed. 5128 * logins and is prepared to have the header pages removed.
4987 **/ 5129 **/
4988void 5130void
4989lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 5131lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4990{ 5132{
4991 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 5133 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4992 5134
5135 if (!phba->sli4_hba.rpi_hdrs_in_use)
5136 goto exit;
5137
4993 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 5138 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4994 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 5139 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4995 list_del(&rpi_hdr->list); 5140 list_del(&rpi_hdr->list);
@@ -4998,9 +5143,9 @@ lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4998 kfree(rpi_hdr->dmabuf); 5143 kfree(rpi_hdr->dmabuf);
4999 kfree(rpi_hdr); 5144 kfree(rpi_hdr);
5000 } 5145 }
5001 5146 exit:
5002 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 5147 /* There are no rpis available to the port now. */
5003 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); 5148 phba->sli4_hba.next_rpi = 0;
5004} 5149}
5005 5150
5006/** 5151/**
@@ -5487,7 +5632,8 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5487 /* Final checks. The port status should be clean. */ 5632 /* Final checks. The port status should be clean. */
5488 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 5633 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
5489 &reg_data.word0) || 5634 &reg_data.word0) ||
5490 bf_get(lpfc_sliport_status_err, &reg_data)) { 5635 (bf_get(lpfc_sliport_status_err, &reg_data) &&
5636 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
5491 phba->work_status[0] = 5637 phba->work_status[0] =
5492 readl(phba->sli4_hba.u.if_type2. 5638 readl(phba->sli4_hba.u.if_type2.
5493 ERR1regaddr); 5639 ERR1regaddr);
@@ -5741,7 +5887,12 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5741{ 5887{
5742 LPFC_MBOXQ_t *pmb; 5888 LPFC_MBOXQ_t *pmb;
5743 struct lpfc_mbx_read_config *rd_config; 5889 struct lpfc_mbx_read_config *rd_config;
5744 uint32_t rc = 0; 5890 union lpfc_sli4_cfg_shdr *shdr;
5891 uint32_t shdr_status, shdr_add_status;
5892 struct lpfc_mbx_get_func_cfg *get_func_cfg;
5893 struct lpfc_rsrc_desc_fcfcoe *desc;
5894 uint32_t desc_count;
5895 int length, i, rc = 0;
5745 5896
5746 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5897 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5747 if (!pmb) { 5898 if (!pmb) {
@@ -5763,6 +5914,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5763 rc = -EIO; 5914 rc = -EIO;
5764 } else { 5915 } else {
5765 rd_config = &pmb->u.mqe.un.rd_config; 5916 rd_config = &pmb->u.mqe.un.rd_config;
5917 phba->sli4_hba.extents_in_use =
5918 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
5766 phba->sli4_hba.max_cfg_param.max_xri = 5919 phba->sli4_hba.max_cfg_param.max_xri =
5767 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 5920 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
5768 phba->sli4_hba.max_cfg_param.xri_base = 5921 phba->sli4_hba.max_cfg_param.xri_base =
@@ -5781,8 +5934,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5781 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 5934 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
5782 phba->sli4_hba.max_cfg_param.max_fcfi = 5935 phba->sli4_hba.max_cfg_param.max_fcfi =
5783 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 5936 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
5784 phba->sli4_hba.max_cfg_param.fcfi_base =
5785 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
5786 phba->sli4_hba.max_cfg_param.max_eq = 5937 phba->sli4_hba.max_cfg_param.max_eq =
5787 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 5938 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
5788 phba->sli4_hba.max_cfg_param.max_rq = 5939 phba->sli4_hba.max_cfg_param.max_rq =
@@ -5800,11 +5951,13 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5800 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 5951 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
5801 phba->max_vports = phba->max_vpi; 5952 phba->max_vports = phba->max_vpi;
5802 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5953 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5803 "2003 cfg params XRI(B:%d M:%d), " 5954 "2003 cfg params Extents? %d "
5955 "XRI(B:%d M:%d), "
5804 "VPI(B:%d M:%d) " 5956 "VPI(B:%d M:%d) "
5805 "VFI(B:%d M:%d) " 5957 "VFI(B:%d M:%d) "
5806 "RPI(B:%d M:%d) " 5958 "RPI(B:%d M:%d) "
5807 "FCFI(B:%d M:%d)\n", 5959 "FCFI(Count:%d)\n",
5960 phba->sli4_hba.extents_in_use,
5808 phba->sli4_hba.max_cfg_param.xri_base, 5961 phba->sli4_hba.max_cfg_param.xri_base,
5809 phba->sli4_hba.max_cfg_param.max_xri, 5962 phba->sli4_hba.max_cfg_param.max_xri,
5810 phba->sli4_hba.max_cfg_param.vpi_base, 5963 phba->sli4_hba.max_cfg_param.vpi_base,
@@ -5813,10 +5966,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5813 phba->sli4_hba.max_cfg_param.max_vfi, 5966 phba->sli4_hba.max_cfg_param.max_vfi,
5814 phba->sli4_hba.max_cfg_param.rpi_base, 5967 phba->sli4_hba.max_cfg_param.rpi_base,
5815 phba->sli4_hba.max_cfg_param.max_rpi, 5968 phba->sli4_hba.max_cfg_param.max_rpi,
5816 phba->sli4_hba.max_cfg_param.fcfi_base,
5817 phba->sli4_hba.max_cfg_param.max_fcfi); 5969 phba->sli4_hba.max_cfg_param.max_fcfi);
5818 } 5970 }
5819 mempool_free(pmb, phba->mbox_mem_pool); 5971
5972 if (rc)
5973 goto read_cfg_out;
5820 5974
5821 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 5975 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
5822 if (phba->cfg_hba_queue_depth > 5976 if (phba->cfg_hba_queue_depth >
@@ -5825,6 +5979,65 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5825 phba->cfg_hba_queue_depth = 5979 phba->cfg_hba_queue_depth =
5826 phba->sli4_hba.max_cfg_param.max_xri - 5980 phba->sli4_hba.max_cfg_param.max_xri -
5827 lpfc_sli4_get_els_iocb_cnt(phba); 5981 lpfc_sli4_get_els_iocb_cnt(phba);
5982
5983 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
5984 LPFC_SLI_INTF_IF_TYPE_2)
5985 goto read_cfg_out;
5986
5987 /* get the pf# and vf# for SLI4 if_type 2 port */
5988 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
5989 sizeof(struct lpfc_sli4_cfg_mhdr));
5990 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
5991 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
5992 length, LPFC_SLI4_MBX_EMBED);
5993
5994 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5995 shdr = (union lpfc_sli4_cfg_shdr *)
5996 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
5997 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5998 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5999 if (rc || shdr_status || shdr_add_status) {
6000 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6001 "3026 Mailbox failed , mbxCmd x%x "
6002 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6003 bf_get(lpfc_mqe_command, &pmb->u.mqe),
6004 bf_get(lpfc_mqe_status, &pmb->u.mqe));
6005 rc = -EIO;
6006 goto read_cfg_out;
6007 }
6008
6009 /* search for fc_fcoe resrouce descriptor */
6010 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6011 desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
6012
6013 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
6014 desc = (struct lpfc_rsrc_desc_fcfcoe *)
6015 &get_func_cfg->func_cfg.desc[i];
6016 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
6017 bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
6018 phba->sli4_hba.iov.pf_number =
6019 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
6020 phba->sli4_hba.iov.vf_number =
6021 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
6022 break;
6023 }
6024 }
6025
6026 if (i < LPFC_RSRC_DESC_MAX_NUM)
6027 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6028 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
6029 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
6030 phba->sli4_hba.iov.vf_number);
6031 else {
6032 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6033 "3028 GET_FUNCTION_CONFIG: failed to find "
6034 "Resrouce Descriptor:x%x\n",
6035 LPFC_RSRC_DESC_TYPE_FCFCOE);
6036 rc = -EIO;
6037 }
6038
6039read_cfg_out:
6040 mempool_free(pmb, phba->mbox_mem_pool);
5828 return rc; 6041 return rc;
5829} 6042}
5830 6043
@@ -6229,8 +6442,10 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6229 phba->sli4_hba.mbx_cq = NULL; 6442 phba->sli4_hba.mbx_cq = NULL;
6230 6443
6231 /* Release FCP response complete queue */ 6444 /* Release FCP response complete queue */
6232 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6445 fcp_qidx = 0;
6446 do
6233 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 6447 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
6448 while (++fcp_qidx < phba->cfg_fcp_eq_count);
6234 kfree(phba->sli4_hba.fcp_cq); 6449 kfree(phba->sli4_hba.fcp_cq);
6235 phba->sli4_hba.fcp_cq = NULL; 6450 phba->sli4_hba.fcp_cq = NULL;
6236 6451
@@ -6353,16 +6568,24 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6353 phba->sli4_hba.sp_eq->queue_id); 6568 phba->sli4_hba.sp_eq->queue_id);
6354 6569
6355 /* Set up fast-path FCP Response Complete Queue */ 6570 /* Set up fast-path FCP Response Complete Queue */
6356 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6571 fcp_cqidx = 0;
6572 do {
6357 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6573 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6358 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6574 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6359 "0526 Fast-path FCP CQ (%d) not " 6575 "0526 Fast-path FCP CQ (%d) not "
6360 "allocated\n", fcp_cqidx); 6576 "allocated\n", fcp_cqidx);
6361 goto out_destroy_fcp_cq; 6577 goto out_destroy_fcp_cq;
6362 } 6578 }
6363 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 6579 if (phba->cfg_fcp_eq_count)
6364 phba->sli4_hba.fp_eq[fcp_cqidx], 6580 rc = lpfc_cq_create(phba,
6365 LPFC_WCQ, LPFC_FCP); 6581 phba->sli4_hba.fcp_cq[fcp_cqidx],
6582 phba->sli4_hba.fp_eq[fcp_cqidx],
6583 LPFC_WCQ, LPFC_FCP);
6584 else
6585 rc = lpfc_cq_create(phba,
6586 phba->sli4_hba.fcp_cq[fcp_cqidx],
6587 phba->sli4_hba.sp_eq,
6588 LPFC_WCQ, LPFC_FCP);
6366 if (rc) { 6589 if (rc) {
6367 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6590 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6368 "0527 Failed setup of fast-path FCP " 6591 "0527 Failed setup of fast-path FCP "
@@ -6371,12 +6594,15 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6371 } 6594 }
6372 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6595 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6373 "2588 FCP CQ setup: cq[%d]-id=%d, " 6596 "2588 FCP CQ setup: cq[%d]-id=%d, "
6374 "parent eq[%d]-id=%d\n", 6597 "parent %seq[%d]-id=%d\n",
6375 fcp_cqidx, 6598 fcp_cqidx,
6376 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 6599 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6600 (phba->cfg_fcp_eq_count) ? "" : "sp_",
6377 fcp_cqidx, 6601 fcp_cqidx,
6378 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); 6602 (phba->cfg_fcp_eq_count) ?
6379 } 6603 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
6604 phba->sli4_hba.sp_eq->queue_id);
6605 } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
6380 6606
6381 /* 6607 /*
6382 * Set up all the Work Queues (WQs) 6608 * Set up all the Work Queues (WQs)
@@ -6445,7 +6671,9 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6445 fcp_cq_index, 6671 fcp_cq_index,
6446 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 6672 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6447 /* Round robin FCP Work Queue's Completion Queue assignment */ 6673 /* Round robin FCP Work Queue's Completion Queue assignment */
6448 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); 6674 if (phba->cfg_fcp_eq_count)
6675 fcp_cq_index = ((fcp_cq_index + 1) %
6676 phba->cfg_fcp_eq_count);
6449 } 6677 }
6450 6678
6451 /* 6679 /*
@@ -6827,6 +7055,8 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
6827 if (rdy_chk < 1000) 7055 if (rdy_chk < 1000)
6828 break; 7056 break;
6829 } 7057 }
7058 /* delay driver action following IF_TYPE_2 function reset */
7059 msleep(100);
6830 break; 7060 break;
6831 case LPFC_SLI_INTF_IF_TYPE_1: 7061 case LPFC_SLI_INTF_IF_TYPE_1:
6832 default: 7062 default:
@@ -7419,11 +7649,15 @@ enable_msix_vectors:
7419 /* 7649 /*
7420 * Assign MSI-X vectors to interrupt handlers 7650 * Assign MSI-X vectors to interrupt handlers
7421 */ 7651 */
7422 7652 if (vectors > 1)
7423 /* The first vector must associated to slow-path handler for MQ */ 7653 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7424 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 7654 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
7425 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 7655 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7426 LPFC_SP_DRIVER_HANDLER_NAME, phba); 7656 else
7657 /* All Interrupts need to be handled by one EQ */
7658 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7659 &lpfc_sli4_intr_handler, IRQF_SHARED,
7660 LPFC_DRIVER_NAME, phba);
7427 if (rc) { 7661 if (rc) {
7428 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7662 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7429 "0485 MSI-X slow-path request_irq failed " 7663 "0485 MSI-X slow-path request_irq failed "
@@ -7765,6 +7999,7 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7765{ 7999{
7766 int wait_cnt = 0; 8000 int wait_cnt = 0;
7767 LPFC_MBOXQ_t *mboxq; 8001 LPFC_MBOXQ_t *mboxq;
8002 struct pci_dev *pdev = phba->pcidev;
7768 8003
7769 lpfc_stop_hba_timers(phba); 8004 lpfc_stop_hba_timers(phba);
7770 phba->sli4_hba.intr_enable = 0; 8005 phba->sli4_hba.intr_enable = 0;
@@ -7804,6 +8039,10 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7804 /* Disable PCI subsystem interrupt */ 8039 /* Disable PCI subsystem interrupt */
7805 lpfc_sli4_disable_intr(phba); 8040 lpfc_sli4_disable_intr(phba);
7806 8041
8042 /* Disable SR-IOV if enabled */
8043 if (phba->cfg_sriov_nr_virtfn)
8044 pci_disable_sriov(pdev);
8045
7807 /* Stop kthread signal shall trigger work_done one more time */ 8046 /* Stop kthread signal shall trigger work_done one more time */
7808 kthread_stop(phba->worker_thread); 8047 kthread_stop(phba->worker_thread);
7809 8048
@@ -7878,6 +8117,11 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7878 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 8117 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
7879 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 8118 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
7880 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 8119 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
8120
8121 /* Make sure that sge_supp_len can be handled by the driver */
8122 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8123 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8124
7881 return rc; 8125 return rc;
7882} 8126}
7883 8127
@@ -7902,6 +8146,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7902 int length; 8146 int length;
7903 struct lpfc_sli4_parameters *mbx_sli4_parameters; 8147 struct lpfc_sli4_parameters *mbx_sli4_parameters;
7904 8148
8149 /*
8150 * By default, the driver assumes the SLI4 port requires RPI
8151 * header postings. The SLI4_PARAM response will correct this
8152 * assumption.
8153 */
8154 phba->sli4_hba.rpi_hdrs_in_use = 1;
8155
7905 /* Read the port's SLI4 Config Parameters */ 8156 /* Read the port's SLI4 Config Parameters */
7906 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 8157 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
7907 sizeof(struct lpfc_sli4_cfg_mhdr)); 8158 sizeof(struct lpfc_sli4_cfg_mhdr));
@@ -7938,6 +8189,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7938 mbx_sli4_parameters); 8189 mbx_sli4_parameters);
7939 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 8190 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
7940 mbx_sli4_parameters); 8191 mbx_sli4_parameters);
8192 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
8193 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
8194
8195 /* Make sure that sge_supp_len can be handled by the driver */
8196 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8197 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8198
7941 return 0; 8199 return 0;
7942} 8200}
7943 8201
@@ -8173,6 +8431,10 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
8173 8431
8174 lpfc_debugfs_terminate(vport); 8432 lpfc_debugfs_terminate(vport);
8175 8433
8434 /* Disable SR-IOV if enabled */
8435 if (phba->cfg_sriov_nr_virtfn)
8436 pci_disable_sriov(pdev);
8437
8176 /* Disable interrupt */ 8438 /* Disable interrupt */
8177 lpfc_sli_disable_intr(phba); 8439 lpfc_sli_disable_intr(phba);
8178 8440
@@ -8565,6 +8827,97 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
8565} 8827}
8566 8828
8567/** 8829/**
8830 * lpfc_write_firmware - attempt to write a firmware image to the port
8831 * @phba: pointer to lpfc hba data structure.
8832 * @fw: pointer to firmware image returned from request_firmware.
8833 *
8834 * returns the number of bytes written if write is successful.
8835 * returns a negative error value if there were errors.
8836 * returns 0 if firmware matches currently active firmware on port.
8837 **/
8838int
8839lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
8840{
8841 char fwrev[32];
8842 struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
8843 struct list_head dma_buffer_list;
8844 int i, rc = 0;
8845 struct lpfc_dmabuf *dmabuf, *next;
8846 uint32_t offset = 0, temp_offset = 0;
8847
8848 INIT_LIST_HEAD(&dma_buffer_list);
8849 if ((image->magic_number != LPFC_GROUP_OJECT_MAGIC_NUM) ||
8850 (bf_get(lpfc_grp_hdr_file_type, image) != LPFC_FILE_TYPE_GROUP) ||
8851 (bf_get(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
8852 (image->size != fw->size)) {
8853 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8854 "3022 Invalid FW image found. "
8855 "Magic:%d Type:%x ID:%x\n",
8856 image->magic_number,
8857 bf_get(lpfc_grp_hdr_file_type, image),
8858 bf_get(lpfc_grp_hdr_id, image));
8859 return -EINVAL;
8860 }
8861 lpfc_decode_firmware_rev(phba, fwrev, 1);
8862 if (strncmp(fwrev, image->rev_name, strnlen(fwrev, 16))) {
8863 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8864 "3023 Updating Firmware. Current Version:%s "
8865 "New Version:%s\n",
8866 fwrev, image->rev_name);
8867 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
8868 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
8869 GFP_KERNEL);
8870 if (!dmabuf) {
8871 rc = -ENOMEM;
8872 goto out;
8873 }
8874 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8875 SLI4_PAGE_SIZE,
8876 &dmabuf->phys,
8877 GFP_KERNEL);
8878 if (!dmabuf->virt) {
8879 kfree(dmabuf);
8880 rc = -ENOMEM;
8881 goto out;
8882 }
8883 list_add_tail(&dmabuf->list, &dma_buffer_list);
8884 }
8885 while (offset < fw->size) {
8886 temp_offset = offset;
8887 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
8888 if (offset + SLI4_PAGE_SIZE > fw->size) {
8889 temp_offset += fw->size - offset;
8890 memcpy(dmabuf->virt,
8891 fw->data + temp_offset,
8892 fw->size - offset);
8893 break;
8894 }
8895 temp_offset += SLI4_PAGE_SIZE;
8896 memcpy(dmabuf->virt, fw->data + temp_offset,
8897 SLI4_PAGE_SIZE);
8898 }
8899 rc = lpfc_wr_object(phba, &dma_buffer_list,
8900 (fw->size - offset), &offset);
8901 if (rc) {
8902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8903 "3024 Firmware update failed. "
8904 "%d\n", rc);
8905 goto out;
8906 }
8907 }
8908 rc = offset;
8909 }
8910out:
8911 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
8912 list_del(&dmabuf->list);
8913 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
8914 dmabuf->virt, dmabuf->phys);
8915 kfree(dmabuf);
8916 }
8917 return rc;
8918}
8919
8920/**
8568 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 8921 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
8569 * @pdev: pointer to PCI device 8922 * @pdev: pointer to PCI device
8570 * @pid: pointer to PCI device identifier 8923 * @pid: pointer to PCI device identifier
@@ -8591,6 +8944,10 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8591 int error; 8944 int error;
8592 uint32_t cfg_mode, intr_mode; 8945 uint32_t cfg_mode, intr_mode;
8593 int mcnt; 8946 int mcnt;
8947 int adjusted_fcp_eq_count;
8948 int fcp_qidx;
8949 const struct firmware *fw;
8950 uint8_t file_name[16];
8594 8951
8595 /* Allocate memory for HBA structure */ 8952 /* Allocate memory for HBA structure */
8596 phba = lpfc_hba_alloc(pdev); 8953 phba = lpfc_hba_alloc(pdev);
@@ -8688,11 +9045,25 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8688 error = -ENODEV; 9045 error = -ENODEV;
8689 goto out_free_sysfs_attr; 9046 goto out_free_sysfs_attr;
8690 } 9047 }
8691 /* Default to single FCP EQ for non-MSI-X */ 9048 /* Default to single EQ for non-MSI-X */
8692 if (phba->intr_type != MSIX) 9049 if (phba->intr_type != MSIX)
8693 phba->cfg_fcp_eq_count = 1; 9050 adjusted_fcp_eq_count = 0;
8694 else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count) 9051 else if (phba->sli4_hba.msix_vec_nr <
8695 phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; 9052 phba->cfg_fcp_eq_count + 1)
9053 adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
9054 else
9055 adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
9056 /* Free unused EQs */
9057 for (fcp_qidx = adjusted_fcp_eq_count;
9058 fcp_qidx < phba->cfg_fcp_eq_count;
9059 fcp_qidx++) {
9060 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
9061 /* do not delete the first fcp_cq */
9062 if (fcp_qidx)
9063 lpfc_sli4_queue_free(
9064 phba->sli4_hba.fcp_cq[fcp_qidx]);
9065 }
9066 phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
8696 /* Set up SLI-4 HBA */ 9067 /* Set up SLI-4 HBA */
8697 if (lpfc_sli4_hba_setup(phba)) { 9068 if (lpfc_sli4_hba_setup(phba)) {
8698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8731,6 +9102,14 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8731 /* Perform post initialization setup */ 9102 /* Perform post initialization setup */
8732 lpfc_post_init_setup(phba); 9103 lpfc_post_init_setup(phba);
8733 9104
9105 /* check for firmware upgrade or downgrade */
9106 snprintf(file_name, 16, "%s.grp", phba->ModelName);
9107 error = request_firmware(&fw, file_name, &phba->pcidev->dev);
9108 if (!error) {
9109 lpfc_write_firmware(phba, fw);
9110 release_firmware(fw);
9111 }
9112
8734 /* Check if there are static vports to be created. */ 9113 /* Check if there are static vports to be created. */
8735 lpfc_create_static_vport(phba); 9114 lpfc_create_static_vport(phba);
8736 9115
@@ -9498,6 +9877,10 @@ static struct pci_device_id lpfc_id_table[] = {
9498 PCI_ANY_ID, PCI_ANY_ID, }, 9877 PCI_ANY_ID, PCI_ANY_ID, },
9499 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, 9878 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
9500 PCI_ANY_ID, PCI_ANY_ID, }, 9879 PCI_ANY_ID, PCI_ANY_ID, },
9880 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
9881 PCI_ANY_ID, PCI_ANY_ID, },
9882 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
9883 PCI_ANY_ID, PCI_ANY_ID, },
9501 { 0 } 9884 { 0 }
9502}; 9885};
9503 9886
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e6ce9033f85e..556767028353 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -610,7 +610,8 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
610 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); 610 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
611 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); 611 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
612 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); 612 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
613 mb->un.varRdSparm.vpi = vpi + phba->vpi_base; 613 if (phba->sli_rev >= LPFC_SLI_REV3)
614 mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
614 615
615 /* save address for completion */ 616 /* save address for completion */
616 pmb->context1 = mp; 617 pmb->context1 = mp;
@@ -643,9 +644,10 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
643 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 644 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
644 645
645 mb->un.varUnregDID.did = did; 646 mb->un.varUnregDID.did = did;
646 if (vpi != 0xffff)
647 vpi += phba->vpi_base;
648 mb->un.varUnregDID.vpi = vpi; 647 mb->un.varUnregDID.vpi = vpi;
648 if ((vpi != 0xffff) &&
649 (phba->sli_rev == LPFC_SLI_REV4))
650 mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
649 651
650 mb->mbxCommand = MBX_UNREG_D_ID; 652 mb->mbxCommand = MBX_UNREG_D_ID;
651 mb->mbxOwner = OWN_HOST; 653 mb->mbxOwner = OWN_HOST;
@@ -738,12 +740,10 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
738 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 740 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
739 741
740 mb->un.varRegLogin.rpi = 0; 742 mb->un.varRegLogin.rpi = 0;
741 if (phba->sli_rev == LPFC_SLI_REV4) { 743 if (phba->sli_rev == LPFC_SLI_REV4)
742 mb->un.varRegLogin.rpi = rpi; 744 mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
743 if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR) 745 if (phba->sli_rev >= LPFC_SLI_REV3)
744 return 1; 746 mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
745 }
746 mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
747 mb->un.varRegLogin.did = did; 747 mb->un.varRegLogin.did = did;
748 mb->mbxOwner = OWN_HOST; 748 mb->mbxOwner = OWN_HOST;
749 /* Get a buffer to hold NPorts Service Parameters */ 749 /* Get a buffer to hold NPorts Service Parameters */
@@ -757,7 +757,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
757 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 757 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
758 "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, " 758 "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
759 "rpi x%x\n", vpi, did, rpi); 759 "rpi x%x\n", vpi, did, rpi);
760 return (1); 760 return 1;
761 } 761 }
762 INIT_LIST_HEAD(&mp->list); 762 INIT_LIST_HEAD(&mp->list);
763 sparam = mp->virt; 763 sparam = mp->virt;
@@ -773,7 +773,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
773 mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys); 773 mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
774 mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys); 774 mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
775 775
776 return (0); 776 return 0;
777} 777}
778 778
779/** 779/**
@@ -789,6 +789,9 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
789 * 789 *
790 * This routine prepares the mailbox command for unregistering remote port 790 * This routine prepares the mailbox command for unregistering remote port
791 * login. 791 * login.
792 *
793 * For SLI4 ports, the rpi passed to this function must be the physical
794 * rpi value, not the logical index.
792 **/ 795 **/
793void 796void
794lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, 797lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
@@ -799,9 +802,10 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
799 mb = &pmb->u.mb; 802 mb = &pmb->u.mb;
800 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 803 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
801 804
802 mb->un.varUnregLogin.rpi = (uint16_t) rpi; 805 mb->un.varUnregLogin.rpi = rpi;
803 mb->un.varUnregLogin.rsvd1 = 0; 806 mb->un.varUnregLogin.rsvd1 = 0;
804 mb->un.varUnregLogin.vpi = vpi + phba->vpi_base; 807 if (phba->sli_rev >= LPFC_SLI_REV3)
808 mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
805 809
806 mb->mbxCommand = MBX_UNREG_LOGIN; 810 mb->mbxCommand = MBX_UNREG_LOGIN;
807 mb->mbxOwner = OWN_HOST; 811 mb->mbxOwner = OWN_HOST;
@@ -825,9 +829,16 @@ lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
825 829
826 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 830 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
827 if (mbox) { 831 if (mbox) {
828 lpfc_unreg_login(phba, vport->vpi, 832 /*
829 vport->vpi + phba->vpi_base, mbox); 833 * For SLI4 functions, the rpi field is overloaded for
830 mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ; 834 * the vport context unreg all. This routine passes
835 * 0 for the rpi field in lpfc_unreg_login for compatibility
836 * with SLI3 and then overrides the rpi field with the
837 * expected value for SLI4.
838 */
839 lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
840 mbox);
841 mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
831 mbox->vport = vport; 842 mbox->vport = vport;
832 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 843 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
833 mbox->context1 = NULL; 844 mbox->context1 = NULL;
@@ -865,9 +876,13 @@ lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
865 if ((phba->sli_rev == LPFC_SLI_REV4) && 876 if ((phba->sli_rev == LPFC_SLI_REV4) &&
866 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) 877 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
867 mb->un.varRegVpi.upd = 1; 878 mb->un.varRegVpi.upd = 1;
868 mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base; 879
880 mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
869 mb->un.varRegVpi.sid = vport->fc_myDID; 881 mb->un.varRegVpi.sid = vport->fc_myDID;
870 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base; 882 if (phba->sli_rev == LPFC_SLI_REV4)
883 mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi];
884 else
885 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
871 memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname, 886 memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
872 sizeof(struct lpfc_name)); 887 sizeof(struct lpfc_name));
873 mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]); 888 mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
@@ -901,10 +916,10 @@ lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
901 MAILBOX_t *mb = &pmb->u.mb; 916 MAILBOX_t *mb = &pmb->u.mb;
902 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 917 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
903 918
904 if (phba->sli_rev < LPFC_SLI_REV4) 919 if (phba->sli_rev == LPFC_SLI_REV3)
905 mb->un.varUnregVpi.vpi = vpi + phba->vpi_base; 920 mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi];
906 else 921 else if (phba->sli_rev >= LPFC_SLI_REV4)
907 mb->un.varUnregVpi.sli4_vpi = vpi + phba->vpi_base; 922 mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi];
908 923
909 mb->mbxCommand = MBX_UNREG_VPI; 924 mb->mbxCommand = MBX_UNREG_VPI;
910 mb->mbxOwner = OWN_HOST; 925 mb->mbxOwner = OWN_HOST;
@@ -1735,12 +1750,12 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1735 return length; 1750 return length;
1736 } 1751 }
1737 1752
1738 /* Setup for the none-embedded mbox command */ 1753 /* Setup for the non-embedded mbox command */
1739 pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE; 1754 pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
1740 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ? 1755 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1741 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount; 1756 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1742 /* Allocate record for keeping SGE virtual addresses */ 1757 /* Allocate record for keeping SGE virtual addresses */
1743 mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt), 1758 mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1744 GFP_KERNEL); 1759 GFP_KERNEL);
1745 if (!mbox->sge_array) { 1760 if (!mbox->sge_array) {
1746 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1761 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
@@ -1790,12 +1805,87 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1790 /* The sub-header is in DMA memory, which needs endian converstion */ 1805 /* The sub-header is in DMA memory, which needs endian converstion */
1791 if (cfg_shdr) 1806 if (cfg_shdr)
1792 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr, 1807 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1793 sizeof(union lpfc_sli4_cfg_shdr)); 1808 sizeof(union lpfc_sli4_cfg_shdr));
1794
1795 return alloc_len; 1809 return alloc_len;
1796} 1810}
1797 1811
1798/** 1812/**
1813 * lpfc_sli4_mbox_rsrc_extent - Initialize the opcode resource extent.
1814 * @phba: pointer to lpfc hba data structure.
1815 * @mbox: pointer to an allocated lpfc mbox resource.
1816 * @exts_count: the number of extents, if required, to allocate.
1817 * @rsrc_type: the resource extent type.
1818 * @emb: true if LPFC_SLI4_MBX_EMBED. false if LPFC_SLI4_MBX_NEMBED.
1819 *
1820 * This routine completes the subcommand header for SLI4 resource extent
1821 * mailbox commands. It is called after lpfc_sli4_config. The caller must
1822 * pass an allocated mailbox and the attributes required to initialize the
1823 * mailbox correctly.
1824 *
1825 * Return: the actual length of the mbox command allocated.
1826 **/
1827int
1828lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1829 uint16_t exts_count, uint16_t rsrc_type, bool emb)
1830{
1831 uint8_t opcode = 0;
1832 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL;
1833 void *virtaddr = NULL;
1834
1835 /* Set up SLI4 ioctl command header fields */
1836 if (emb == LPFC_SLI4_MBX_NEMBED) {
1837 /* Get the first SGE entry from the non-embedded DMA memory */
1838 virtaddr = mbox->sge_array->addr[0];
1839 if (virtaddr == NULL)
1840 return 1;
1841 n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
1842 }
1843
1844 /*
1845 * The resource type is common to all extent Opcodes and resides in the
1846 * same position.
1847 */
1848 if (emb == LPFC_SLI4_MBX_EMBED)
1849 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1850 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1851 rsrc_type);
1852 else {
1853 /* This is DMA data. Byteswap is required. */
1854 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1855 n_rsrc_extnt, rsrc_type);
1856 lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4,
1857 &n_rsrc_extnt->word4,
1858 sizeof(uint32_t));
1859 }
1860
1861 /* Complete the initialization for the particular Opcode. */
1862 opcode = lpfc_sli4_mbox_opcode_get(phba, mbox);
1863 switch (opcode) {
1864 case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
1865 if (emb == LPFC_SLI4_MBX_EMBED)
1866 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1867 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1868 exts_count);
1869 else
1870 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1871 n_rsrc_extnt, exts_count);
1872 break;
1873 case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT:
1874 case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO:
1875 case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT:
1876 /* Initialization is complete.*/
1877 break;
1878 default:
1879 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1880 "2929 Resource Extent Opcode x%x is "
1881 "unsupported\n", opcode);
1882 return 1;
1883 }
1884
1885 return 0;
1886}
1887
1888/**
1799 * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command 1889 * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
1800 * @phba: pointer to lpfc hba data structure. 1890 * @phba: pointer to lpfc hba data structure.
1801 * @mbox: pointer to lpfc mbox command. 1891 * @mbox: pointer to lpfc mbox command.
@@ -1939,9 +2029,12 @@ lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
1939 bf_set(lpfc_init_vfi_vr, init_vfi, 1); 2029 bf_set(lpfc_init_vfi_vr, init_vfi, 1);
1940 bf_set(lpfc_init_vfi_vt, init_vfi, 1); 2030 bf_set(lpfc_init_vfi_vt, init_vfi, 1);
1941 bf_set(lpfc_init_vfi_vp, init_vfi, 1); 2031 bf_set(lpfc_init_vfi_vp, init_vfi, 1);
1942 bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base); 2032 bf_set(lpfc_init_vfi_vfi, init_vfi,
1943 bf_set(lpfc_init_vpi_vpi, init_vfi, vport->vpi + vport->phba->vpi_base); 2033 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
1944 bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi); 2034 bf_set(lpfc_init_vpi_vpi, init_vfi,
2035 vport->phba->vpi_ids[vport->vpi]);
2036 bf_set(lpfc_init_vfi_fcfi, init_vfi,
2037 vport->phba->fcf.fcfi);
1945} 2038}
1946 2039
1947/** 2040/**
@@ -1964,9 +2057,10 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
1964 reg_vfi = &mbox->u.mqe.un.reg_vfi; 2057 reg_vfi = &mbox->u.mqe.un.reg_vfi;
1965 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI); 2058 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
1966 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1); 2059 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
1967 bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base); 2060 bf_set(lpfc_reg_vfi_vfi, reg_vfi,
2061 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
1968 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi); 2062 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
1969 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base); 2063 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]);
1970 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name)); 2064 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
1971 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]); 2065 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
1972 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]); 2066 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
@@ -1997,9 +2091,9 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
1997 memset(mbox, 0, sizeof(*mbox)); 2091 memset(mbox, 0, sizeof(*mbox));
1998 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI); 2092 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
1999 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, 2093 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
2000 vpi + phba->vpi_base); 2094 phba->vpi_ids[vpi]);
2001 bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi, 2095 bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
2002 phba->pport->vfi + phba->vfi_base); 2096 phba->sli4_hba.vfi_ids[phba->pport->vfi]);
2003} 2097}
2004 2098
2005/** 2099/**
@@ -2019,7 +2113,7 @@ lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2019 memset(mbox, 0, sizeof(*mbox)); 2113 memset(mbox, 0, sizeof(*mbox));
2020 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI); 2114 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
2021 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, 2115 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
2022 vport->vfi + vport->phba->vfi_base); 2116 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2023} 2117}
2024 2118
2025/** 2119/**
@@ -2131,12 +2225,14 @@ lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
2131void 2225void
2132lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp) 2226lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
2133{ 2227{
2228 struct lpfc_hba *phba = ndlp->phba;
2134 struct lpfc_mbx_resume_rpi *resume_rpi; 2229 struct lpfc_mbx_resume_rpi *resume_rpi;
2135 2230
2136 memset(mbox, 0, sizeof(*mbox)); 2231 memset(mbox, 0, sizeof(*mbox));
2137 resume_rpi = &mbox->u.mqe.un.resume_rpi; 2232 resume_rpi = &mbox->u.mqe.un.resume_rpi;
2138 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI); 2233 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
2139 bf_set(lpfc_resume_rpi_index, resume_rpi, ndlp->nlp_rpi); 2234 bf_set(lpfc_resume_rpi_index, resume_rpi,
2235 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2140 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI); 2236 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
2141 resume_rpi->event_tag = ndlp->phba->fc_eventTag; 2237 resume_rpi->event_tag = ndlp->phba->fc_eventTag;
2142} 2238}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index cbb48ee8b0bb..10d5b5e41499 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -62,7 +62,6 @@ int
62lpfc_mem_alloc(struct lpfc_hba *phba, int align) 62lpfc_mem_alloc(struct lpfc_hba *phba, int align)
63{ 63{
64 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 64 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
65 int longs;
66 int i; 65 int i;
67 66
68 if (phba->sli_rev == LPFC_SLI_REV4) 67 if (phba->sli_rev == LPFC_SLI_REV4)
@@ -138,17 +137,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
138 phba->lpfc_hrb_pool = NULL; 137 phba->lpfc_hrb_pool = NULL;
139 phba->lpfc_drb_pool = NULL; 138 phba->lpfc_drb_pool = NULL;
140 } 139 }
141 /* vpi zero is reserved for the physical port so add 1 to max */
142 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
143 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
144 if (!phba->vpi_bmask)
145 goto fail_free_dbq_pool;
146 140
147 return 0; 141 return 0;
148
149 fail_free_dbq_pool:
150 pci_pool_destroy(phba->lpfc_drb_pool);
151 phba->lpfc_drb_pool = NULL;
152 fail_free_hrb_pool: 142 fail_free_hrb_pool:
153 pci_pool_destroy(phba->lpfc_hrb_pool); 143 pci_pool_destroy(phba->lpfc_hrb_pool);
154 phba->lpfc_hrb_pool = NULL; 144 phba->lpfc_hrb_pool = NULL;
@@ -191,9 +181,6 @@ lpfc_mem_free(struct lpfc_hba *phba)
191 int i; 181 int i;
192 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 182 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
193 183
194 /* Free VPI bitmask memory */
195 kfree(phba->vpi_bmask);
196
197 /* Free HBQ pools */ 184 /* Free HBQ pools */
198 lpfc_sli_hbqbuf_free_all(phba); 185 lpfc_sli_hbqbuf_free_all(phba);
199 if (phba->lpfc_drb_pool) 186 if (phba->lpfc_drb_pool)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 0d92d4205ea6..2ddd02f7c603 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -350,11 +350,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
350 ndlp->nlp_maxframe = 350 ndlp->nlp_maxframe =
351 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; 351 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
352 352
353 /* 353 /* no need to reg_login if we are already in one of these states */
354 * Need to unreg_login if we are already in one of these states and
355 * change to NPR state. This will block the port until after the ACC
356 * completes and the reg_login is issued and completed.
357 */
358 switch (ndlp->nlp_state) { 354 switch (ndlp->nlp_state) {
359 case NLP_STE_NPR_NODE: 355 case NLP_STE_NPR_NODE:
360 if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) 356 if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
@@ -363,9 +359,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
363 case NLP_STE_PRLI_ISSUE: 359 case NLP_STE_PRLI_ISSUE:
364 case NLP_STE_UNMAPPED_NODE: 360 case NLP_STE_UNMAPPED_NODE:
365 case NLP_STE_MAPPED_NODE: 361 case NLP_STE_MAPPED_NODE:
366 lpfc_unreg_rpi(vport, ndlp); 362 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
367 ndlp->nlp_prev_state = ndlp->nlp_state; 363 return 1;
368 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
369 } 364 }
370 365
371 if ((vport->fc_flag & FC_PT2PT) && 366 if ((vport->fc_flag & FC_PT2PT) &&
@@ -657,6 +652,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
657 lpfc_unreg_rpi(vport, ndlp); 652 lpfc_unreg_rpi(vport, ndlp);
658 return 0; 653 return 0;
659} 654}
655
660/** 656/**
661 * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd. 657 * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
662 * @phba : Pointer to lpfc_hba structure. 658 * @phba : Pointer to lpfc_hba structure.
@@ -1399,8 +1395,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1399 if (mb->mbxStatus) { 1395 if (mb->mbxStatus) {
1400 /* RegLogin failed */ 1396 /* RegLogin failed */
1401 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1397 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1402 "0246 RegLogin failed Data: x%x x%x x%x\n", 1398 "0246 RegLogin failed Data: x%x x%x x%x x%x "
1403 did, mb->mbxStatus, vport->port_state); 1399 "x%x\n",
1400 did, mb->mbxStatus, vport->port_state,
1401 mb->un.varRegLogin.vpi,
1402 mb->un.varRegLogin.rpi);
1404 /* 1403 /*
1405 * If RegLogin failed due to lack of HBA resources do not 1404 * If RegLogin failed due to lack of HBA resources do not
1406 * retry discovery. 1405 * retry discovery.
@@ -1424,7 +1423,10 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1424 return ndlp->nlp_state; 1423 return ndlp->nlp_state;
1425 } 1424 }
1426 1425
1427 ndlp->nlp_rpi = mb->un.varWords[0]; 1426 /* SLI4 ports have preallocated logical rpis. */
1427 if (vport->phba->sli_rev < LPFC_SLI_REV4)
1428 ndlp->nlp_rpi = mb->un.varWords[0];
1429
1428 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1430 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1429 1431
1430 /* Only if we are not a fabric nport do we issue PRLI */ 1432 /* Only if we are not a fabric nport do we issue PRLI */
@@ -2025,7 +2027,9 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
2025 MAILBOX_t *mb = &pmb->u.mb; 2027 MAILBOX_t *mb = &pmb->u.mb;
2026 2028
2027 if (!mb->mbxStatus) { 2029 if (!mb->mbxStatus) {
2028 ndlp->nlp_rpi = mb->un.varWords[0]; 2030 /* SLI4 ports have preallocated logical rpis. */
2031 if (vport->phba->sli_rev < LPFC_SLI_REV4)
2032 ndlp->nlp_rpi = mb->un.varWords[0];
2029 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 2033 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
2030 } else { 2034 } else {
2031 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { 2035 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 84e4481b2406..3ccc97496ebf 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -743,7 +743,14 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
743 if (bcnt == 0) 743 if (bcnt == 0)
744 continue; 744 continue;
745 /* Now, post the SCSI buffer list sgls as a block */ 745 /* Now, post the SCSI buffer list sgls as a block */
746 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); 746 if (!phba->sli4_hba.extents_in_use)
747 status = lpfc_sli4_post_scsi_sgl_block(phba,
748 &sblist,
749 bcnt);
750 else
751 status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
752 &sblist,
753 bcnt);
747 /* Reset SCSI buffer count for next round of posting */ 754 /* Reset SCSI buffer count for next round of posting */
748 bcnt = 0; 755 bcnt = 0;
749 while (!list_empty(&sblist)) { 756 while (!list_empty(&sblist)) {
@@ -787,7 +794,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
787 dma_addr_t pdma_phys_fcp_cmd; 794 dma_addr_t pdma_phys_fcp_cmd;
788 dma_addr_t pdma_phys_fcp_rsp; 795 dma_addr_t pdma_phys_fcp_rsp;
789 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; 796 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
790 uint16_t iotag, last_xritag = NO_XRI; 797 uint16_t iotag, last_xritag = NO_XRI, lxri = 0;
791 int status = 0, index; 798 int status = 0, index;
792 int bcnt; 799 int bcnt;
793 int non_sequential_xri = 0; 800 int non_sequential_xri = 0;
@@ -823,13 +830,15 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
823 break; 830 break;
824 } 831 }
825 832
826 psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba); 833 lxri = lpfc_sli4_next_xritag(phba);
827 if (psb->cur_iocbq.sli4_xritag == NO_XRI) { 834 if (lxri == NO_XRI) {
828 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 835 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
829 psb->data, psb->dma_handle); 836 psb->data, psb->dma_handle);
830 kfree(psb); 837 kfree(psb);
831 break; 838 break;
832 } 839 }
840 psb->cur_iocbq.sli4_lxritag = lxri;
841 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
833 if (last_xritag != NO_XRI 842 if (last_xritag != NO_XRI
834 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) { 843 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
835 non_sequential_xri = 1; 844 non_sequential_xri = 1;
@@ -861,6 +870,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
861 */ 870 */
862 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); 871 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
863 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); 872 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
873 sgl->word2 = le32_to_cpu(sgl->word2);
864 bf_set(lpfc_sli4_sge_last, sgl, 0); 874 bf_set(lpfc_sli4_sge_last, sgl, 0);
865 sgl->word2 = cpu_to_le32(sgl->word2); 875 sgl->word2 = cpu_to_le32(sgl->word2);
866 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); 876 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
@@ -869,6 +879,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
869 /* Setup the physical region for the FCP RSP */ 879 /* Setup the physical region for the FCP RSP */
870 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); 880 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
871 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); 881 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
882 sgl->word2 = le32_to_cpu(sgl->word2);
872 bf_set(lpfc_sli4_sge_last, sgl, 1); 883 bf_set(lpfc_sli4_sge_last, sgl, 1);
873 sgl->word2 = cpu_to_le32(sgl->word2); 884 sgl->word2 = cpu_to_le32(sgl->word2);
874 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); 885 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
@@ -914,7 +925,21 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
914 } 925 }
915 } 926 }
916 if (bcnt) { 927 if (bcnt) {
917 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); 928 if (!phba->sli4_hba.extents_in_use)
929 status = lpfc_sli4_post_scsi_sgl_block(phba,
930 &sblist,
931 bcnt);
932 else
933 status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
934 &sblist,
935 bcnt);
936
937 if (status) {
938 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
939 "3021 SCSI SGL post error %d\n",
940 status);
941 bcnt = 0;
942 }
918 /* Reset SCSI buffer count for next round of posting */ 943 /* Reset SCSI buffer count for next round of posting */
919 while (!list_empty(&sblist)) { 944 while (!list_empty(&sblist)) {
920 list_remove_head(&sblist, psb, struct lpfc_scsi_buf, 945 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
@@ -2081,6 +2106,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2081 dma_len = sg_dma_len(sgel); 2106 dma_len = sg_dma_len(sgel);
2082 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); 2107 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2083 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); 2108 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2109 sgl->word2 = le32_to_cpu(sgl->word2);
2084 if ((num_bde + 1) == nseg) 2110 if ((num_bde + 1) == nseg)
2085 bf_set(lpfc_sli4_sge_last, sgl, 1); 2111 bf_set(lpfc_sli4_sge_last, sgl, 1);
2086 else 2112 else
@@ -2794,6 +2820,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2794 * of the scsi_cmnd request_buffer 2820 * of the scsi_cmnd request_buffer
2795 */ 2821 */
2796 piocbq->iocb.ulpContext = pnode->nlp_rpi; 2822 piocbq->iocb.ulpContext = pnode->nlp_rpi;
2823 if (phba->sli_rev == LPFC_SLI_REV4)
2824 piocbq->iocb.ulpContext =
2825 phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
2797 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 2826 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
2798 piocbq->iocb.ulpFCP2Rcvy = 1; 2827 piocbq->iocb.ulpFCP2Rcvy = 1;
2799 else 2828 else
@@ -2807,7 +2836,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2807} 2836}
2808 2837
2809/** 2838/**
2810 * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit 2839 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
2811 * @vport: The virtual port for which this call is being executed. 2840 * @vport: The virtual port for which this call is being executed.
2812 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2841 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2813 * @lun: Logical unit number. 2842 * @lun: Logical unit number.
@@ -2851,6 +2880,10 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2851 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); 2880 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
2852 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 2881 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
2853 piocb->ulpContext = ndlp->nlp_rpi; 2882 piocb->ulpContext = ndlp->nlp_rpi;
2883 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
2884 piocb->ulpContext =
2885 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
2886 }
2854 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 2887 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
2855 piocb->ulpFCP2Rcvy = 1; 2888 piocb->ulpFCP2Rcvy = 1;
2856 } 2889 }
@@ -3405,9 +3438,10 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3405 3438
3406 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 3439 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3407 "0702 Issue %s to TGT %d LUN %d " 3440 "0702 Issue %s to TGT %d LUN %d "
3408 "rpi x%x nlp_flag x%x\n", 3441 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
3409 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, 3442 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
3410 pnode->nlp_rpi, pnode->nlp_flag); 3443 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
3444 iocbq->iocb_flag);
3411 3445
3412 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, 3446 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
3413 iocbq, iocbqrsp, lpfc_cmd->timeout); 3447 iocbq, iocbqrsp, lpfc_cmd->timeout);
@@ -3419,10 +3453,12 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3419 ret = FAILED; 3453 ret = FAILED;
3420 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 3454 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3421 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3455 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3422 "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n", 3456 "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
3457 "iocb_flag x%x\n",
3423 lpfc_taskmgmt_name(task_mgmt_cmd), 3458 lpfc_taskmgmt_name(task_mgmt_cmd),
3424 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus, 3459 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
3425 iocbqrsp->iocb.un.ulpWord[4]); 3460 iocbqrsp->iocb.un.ulpWord[4],
3461 iocbq->iocb_flag);
3426 } else if (status == IOCB_BUSY) 3462 } else if (status == IOCB_BUSY)
3427 ret = FAILED; 3463 ret = FAILED;
3428 else 3464 else
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index fd5835e1c039..98999bbd8cbf 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -65,6 +65,9 @@ static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
65 struct lpfc_iocbq *); 65 struct lpfc_iocbq *);
66static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 66static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
67 struct hbq_dmabuf *); 67 struct hbq_dmabuf *);
68static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
69 struct lpfc_cqe *);
70
68static IOCB_t * 71static IOCB_t *
69lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 72lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
70{ 73{
@@ -456,7 +459,6 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
456 struct lpfc_iocbq * iocbq = NULL; 459 struct lpfc_iocbq * iocbq = NULL;
457 460
458 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 461 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
459
460 if (iocbq) 462 if (iocbq)
461 phba->iocb_cnt++; 463 phba->iocb_cnt++;
462 if (phba->iocb_cnt > phba->iocb_max) 464 if (phba->iocb_cnt > phba->iocb_max)
@@ -479,13 +481,10 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
479static struct lpfc_sglq * 481static struct lpfc_sglq *
480__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 482__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
481{ 483{
482 uint16_t adj_xri;
483 struct lpfc_sglq *sglq; 484 struct lpfc_sglq *sglq;
484 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; 485
485 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) 486 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
486 return NULL; 487 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
487 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
488 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
489 return sglq; 488 return sglq;
490} 489}
491 490
@@ -504,12 +503,9 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
504struct lpfc_sglq * 503struct lpfc_sglq *
505__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 504__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
506{ 505{
507 uint16_t adj_xri;
508 struct lpfc_sglq *sglq; 506 struct lpfc_sglq *sglq;
509 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; 507
510 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) 508 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
511 return NULL;
512 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
513 return sglq; 509 return sglq;
514} 510}
515 511
@@ -532,7 +528,6 @@ static int
532__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 528__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
533 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 529 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
534{ 530{
535 uint16_t adj_xri;
536 struct lpfc_node_rrq *rrq; 531 struct lpfc_node_rrq *rrq;
537 int empty; 532 int empty;
538 uint32_t did = 0; 533 uint32_t did = 0;
@@ -553,21 +548,19 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
553 /* 548 /*
554 * set the active bit even if there is no mem available. 549 * set the active bit even if there is no mem available.
555 */ 550 */
556 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
557
558 if (NLP_CHK_FREE_REQ(ndlp)) 551 if (NLP_CHK_FREE_REQ(ndlp))
559 goto out; 552 goto out;
560 553
561 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 554 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
562 goto out; 555 goto out;
563 556
564 if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) 557 if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
565 goto out; 558 goto out;
566 559
567 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 560 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
568 if (rrq) { 561 if (rrq) {
569 rrq->send_rrq = send_rrq; 562 rrq->send_rrq = send_rrq;
570 rrq->xritag = xritag; 563 rrq->xritag = phba->sli4_hba.xri_ids[xritag];
571 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); 564 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
572 rrq->ndlp = ndlp; 565 rrq->ndlp = ndlp;
573 rrq->nlp_DID = ndlp->nlp_DID; 566 rrq->nlp_DID = ndlp->nlp_DID;
@@ -603,7 +596,6 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
603 uint16_t xritag, 596 uint16_t xritag,
604 struct lpfc_node_rrq *rrq) 597 struct lpfc_node_rrq *rrq)
605{ 598{
606 uint16_t adj_xri;
607 struct lpfc_nodelist *ndlp = NULL; 599 struct lpfc_nodelist *ndlp = NULL;
608 600
609 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) 601 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
@@ -619,8 +611,7 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
619 if (!ndlp) 611 if (!ndlp)
620 goto out; 612 goto out;
621 613
622 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; 614 if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
623 if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
624 rrq->send_rrq = 0; 615 rrq->send_rrq = 0;
625 rrq->xritag = 0; 616 rrq->xritag = 0;
626 rrq->rrq_stop_time = 0; 617 rrq->rrq_stop_time = 0;
@@ -796,12 +787,9 @@ int
796lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 787lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
797 uint16_t xritag) 788 uint16_t xritag)
798{ 789{
799 uint16_t adj_xri;
800
801 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
802 if (!ndlp) 790 if (!ndlp)
803 return 0; 791 return 0;
804 if (test_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) 792 if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
805 return 1; 793 return 1;
806 else 794 else
807 return 0; 795 return 0;
@@ -841,7 +829,7 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
841 * @piocb: Pointer to the iocbq. 829 * @piocb: Pointer to the iocbq.
842 * 830 *
843 * This function is called with hbalock held. This function 831 * This function is called with hbalock held. This function
844 * Gets a new driver sglq object from the sglq list. If the 832 * gets a new driver sglq object from the sglq list. If the
845 * list is not empty then it is successful, it returns pointer to the newly 833 * list is not empty then it is successful, it returns pointer to the newly
846 * allocated sglq object else it returns NULL. 834 * allocated sglq object else it returns NULL.
847 **/ 835 **/
@@ -851,7 +839,6 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
851 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; 839 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
852 struct lpfc_sglq *sglq = NULL; 840 struct lpfc_sglq *sglq = NULL;
853 struct lpfc_sglq *start_sglq = NULL; 841 struct lpfc_sglq *start_sglq = NULL;
854 uint16_t adj_xri;
855 struct lpfc_scsi_buf *lpfc_cmd; 842 struct lpfc_scsi_buf *lpfc_cmd;
856 struct lpfc_nodelist *ndlp; 843 struct lpfc_nodelist *ndlp;
857 int found = 0; 844 int found = 0;
@@ -870,8 +857,6 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
870 while (!found) { 857 while (!found) {
871 if (!sglq) 858 if (!sglq)
872 return NULL; 859 return NULL;
873 adj_xri = sglq->sli4_xritag -
874 phba->sli4_hba.max_cfg_param.xri_base;
875 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) { 860 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
876 /* This xri has an rrq outstanding for this DID. 861 /* This xri has an rrq outstanding for this DID.
877 * put it back in the list and get another xri. 862 * put it back in the list and get another xri.
@@ -888,7 +873,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
888 } 873 }
889 sglq->ndlp = ndlp; 874 sglq->ndlp = ndlp;
890 found = 1; 875 found = 1;
891 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; 876 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
892 sglq->state = SGL_ALLOCATED; 877 sglq->state = SGL_ALLOCATED;
893 } 878 }
894 return sglq; 879 return sglq;
@@ -944,7 +929,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
944 if (iocbq->sli4_xritag == NO_XRI) 929 if (iocbq->sli4_xritag == NO_XRI)
945 sglq = NULL; 930 sglq = NULL;
946 else 931 else
947 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); 932 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
933
948 if (sglq) { 934 if (sglq) {
949 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 935 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
950 (sglq->state != SGL_XRI_ABORTED)) { 936 (sglq->state != SGL_XRI_ABORTED)) {
@@ -971,6 +957,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
971 * Clean all volatile data fields, preserve iotag and node struct. 957 * Clean all volatile data fields, preserve iotag and node struct.
972 */ 958 */
973 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 959 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
960 iocbq->sli4_lxritag = NO_XRI;
974 iocbq->sli4_xritag = NO_XRI; 961 iocbq->sli4_xritag = NO_XRI;
975 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 962 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
976} 963}
@@ -2113,7 +2100,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2113 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2100 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2114 !pmb->u.mb.mbxStatus) { 2101 !pmb->u.mb.mbxStatus) {
2115 rpi = pmb->u.mb.un.varWords[0]; 2102 rpi = pmb->u.mb.un.varWords[0];
2116 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base; 2103 vpi = pmb->u.mb.un.varRegLogin.vpi;
2117 lpfc_unreg_login(phba, vpi, rpi, pmb); 2104 lpfc_unreg_login(phba, vpi, rpi, pmb);
2118 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2105 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2119 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2106 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
@@ -3881,8 +3868,10 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3881 list_del_init(&phba->sli4_hba.els_cq->list); 3868 list_del_init(&phba->sli4_hba.els_cq->list);
3882 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) 3869 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3883 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); 3870 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3884 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++) 3871 qindx = 0;
3872 do
3885 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list); 3873 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
3874 while (++qindx < phba->cfg_fcp_eq_count);
3886 spin_unlock_irq(&phba->hbalock); 3875 spin_unlock_irq(&phba->hbalock);
3887 3876
3888 /* Now physically reset the device */ 3877 /* Now physically reset the device */
@@ -4318,6 +4307,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4318 continue; 4307 continue;
4319 } else if (rc) 4308 } else if (rc)
4320 break; 4309 break;
4310
4321 phba->link_state = LPFC_INIT_MBX_CMDS; 4311 phba->link_state = LPFC_INIT_MBX_CMDS;
4322 lpfc_config_port(phba, pmb); 4312 lpfc_config_port(phba, pmb);
4323 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4313 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -4421,7 +4411,8 @@ int
4421lpfc_sli_hba_setup(struct lpfc_hba *phba) 4411lpfc_sli_hba_setup(struct lpfc_hba *phba)
4422{ 4412{
4423 uint32_t rc; 4413 uint32_t rc;
4424 int mode = 3; 4414 int mode = 3, i;
4415 int longs;
4425 4416
4426 switch (lpfc_sli_mode) { 4417 switch (lpfc_sli_mode) {
4427 case 2: 4418 case 2:
@@ -4491,6 +4482,35 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
4491 if (rc) 4482 if (rc)
4492 goto lpfc_sli_hba_setup_error; 4483 goto lpfc_sli_hba_setup_error;
4493 4484
4485 /* Initialize VPIs. */
4486 if (phba->sli_rev == LPFC_SLI_REV3) {
4487 /*
4488 * The VPI bitmask and physical ID array are allocated
4489 * and initialized once only - at driver load. A port
4490 * reset doesn't need to reinitialize this memory.
4491 */
4492 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4493 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4494 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4495 GFP_KERNEL);
4496 if (!phba->vpi_bmask) {
4497 rc = -ENOMEM;
4498 goto lpfc_sli_hba_setup_error;
4499 }
4500
4501 phba->vpi_ids = kzalloc(
4502 (phba->max_vpi+1) * sizeof(uint16_t),
4503 GFP_KERNEL);
4504 if (!phba->vpi_ids) {
4505 kfree(phba->vpi_bmask);
4506 rc = -ENOMEM;
4507 goto lpfc_sli_hba_setup_error;
4508 }
4509 for (i = 0; i < phba->max_vpi; i++)
4510 phba->vpi_ids[i] = i;
4511 }
4512 }
4513
4494 /* Init HBQs */ 4514 /* Init HBQs */
4495 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 4515 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4496 rc = lpfc_sli_hbq_setup(phba); 4516 rc = lpfc_sli_hbq_setup(phba);
@@ -4677,9 +4697,11 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4677 4697
4678 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4698 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4679 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4699 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4680 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) 4700 fcp_eqidx = 0;
4701 do
4681 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4702 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4682 LPFC_QUEUE_REARM); 4703 LPFC_QUEUE_REARM);
4704 while (++fcp_eqidx < phba->cfg_fcp_eq_count);
4683 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); 4705 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
4684 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) 4706 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4685 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], 4707 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
@@ -4687,6 +4709,803 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4687} 4709}
4688 4710
4689/** 4711/**
4712 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
4713 * @phba: Pointer to HBA context object.
4714 * @type: The resource extent type.
4715 *
4716 * This function allocates all SLI4 resource identifiers.
4717 **/
4718static int
4719lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
4720 uint16_t *extnt_count, uint16_t *extnt_size)
4721{
4722 int rc = 0;
4723 uint32_t length;
4724 uint32_t mbox_tmo;
4725 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
4726 LPFC_MBOXQ_t *mbox;
4727
4728 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4729 if (!mbox)
4730 return -ENOMEM;
4731
4732 /* Find out how many extents are available for this resource type */
4733 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
4734 sizeof(struct lpfc_sli4_cfg_mhdr));
4735 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
4736 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
4737 length, LPFC_SLI4_MBX_EMBED);
4738
4739 /* Send an extents count of 0 - the GET doesn't use it. */
4740 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
4741 LPFC_SLI4_MBX_EMBED);
4742 if (unlikely(rc)) {
4743 rc = -EIO;
4744 goto err_exit;
4745 }
4746
4747 if (!phba->sli4_hba.intr_enable)
4748 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
4749 else {
4750 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
4751 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
4752 }
4753 if (unlikely(rc)) {
4754 rc = -EIO;
4755 goto err_exit;
4756 }
4757
4758 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
4759 if (bf_get(lpfc_mbox_hdr_status,
4760 &rsrc_info->header.cfg_shdr.response)) {
4761 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4762 "2930 Failed to get resource extents "
4763 "Status 0x%x Add'l Status 0x%x\n",
4764 bf_get(lpfc_mbox_hdr_status,
4765 &rsrc_info->header.cfg_shdr.response),
4766 bf_get(lpfc_mbox_hdr_add_status,
4767 &rsrc_info->header.cfg_shdr.response));
4768 rc = -EIO;
4769 goto err_exit;
4770 }
4771
4772 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
4773 &rsrc_info->u.rsp);
4774 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
4775 &rsrc_info->u.rsp);
4776 err_exit:
4777 mempool_free(mbox, phba->mbox_mem_pool);
4778 return rc;
4779}
4780
4781/**
4782 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
4783 * @phba: Pointer to HBA context object.
4784 * @type: The extent type to check.
4785 *
4786 * This function reads the current available extents from the port and checks
4787 * if the extent count or extent size has changed since the last access.
4788 * Callers use this routine post port reset to understand if there is a
4789 * extent reprovisioning requirement.
4790 *
4791 * Returns:
4792 * -Error: error indicates problem.
4793 * 1: Extent count or size has changed.
4794 * 0: No changes.
4795 **/
4796static int
4797lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
4798{
4799 uint16_t curr_ext_cnt, rsrc_ext_cnt;
4800 uint16_t size_diff, rsrc_ext_size;
4801 int rc = 0;
4802 struct lpfc_rsrc_blks *rsrc_entry;
4803 struct list_head *rsrc_blk_list = NULL;
4804
4805 size_diff = 0;
4806 curr_ext_cnt = 0;
4807 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
4808 &rsrc_ext_cnt,
4809 &rsrc_ext_size);
4810 if (unlikely(rc))
4811 return -EIO;
4812
4813 switch (type) {
4814 case LPFC_RSC_TYPE_FCOE_RPI:
4815 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
4816 break;
4817 case LPFC_RSC_TYPE_FCOE_VPI:
4818 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
4819 break;
4820 case LPFC_RSC_TYPE_FCOE_XRI:
4821 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
4822 break;
4823 case LPFC_RSC_TYPE_FCOE_VFI:
4824 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
4825 break;
4826 default:
4827 break;
4828 }
4829
4830 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
4831 curr_ext_cnt++;
4832 if (rsrc_entry->rsrc_size != rsrc_ext_size)
4833 size_diff++;
4834 }
4835
4836 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
4837 rc = 1;
4838
4839 return rc;
4840}
4841
4842/**
4843 * lpfc_sli4_cfg_post_extnts -
4844 * @phba: Pointer to HBA context object.
4845 * @extnt_cnt - number of available extents.
4846 * @type - the extent type (rpi, xri, vfi, vpi).
4847 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
4848 * @mbox - pointer to the caller's allocated mailbox structure.
4849 *
4850 * This function executes the extents allocation request. It also
4851 * takes care of the amount of memory needed to allocate or get the
4852 * allocated extents. It is the caller's responsibility to evaluate
4853 * the response.
4854 *
4855 * Returns:
4856 * -Error: Error value describes the condition found.
4857 * 0: if successful
4858 **/
4859static int
4860lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
4861 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
4862{
4863 int rc = 0;
4864 uint32_t req_len;
4865 uint32_t emb_len;
4866 uint32_t alloc_len, mbox_tmo;
4867
4868 /* Calculate the total requested length of the dma memory */
4869 req_len = *extnt_cnt * sizeof(uint16_t);
4870
4871 /*
4872 * Calculate the size of an embedded mailbox. The uint32_t
4873 * accounts for extents-specific word.
4874 */
4875 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
4876 sizeof(uint32_t);
4877
4878 /*
4879 * Presume the allocation and response will fit into an embedded
4880 * mailbox. If not true, reconfigure to a non-embedded mailbox.
4881 */
4882 *emb = LPFC_SLI4_MBX_EMBED;
4883 if (req_len > emb_len) {
4884 req_len = *extnt_cnt * sizeof(uint16_t) +
4885 sizeof(union lpfc_sli4_cfg_shdr) +
4886 sizeof(uint32_t);
4887 *emb = LPFC_SLI4_MBX_NEMBED;
4888 }
4889
4890 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
4891 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
4892 req_len, *emb);
4893 if (alloc_len < req_len) {
4894 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4895 "9000 Allocated DMA memory size (x%x) is "
4896 "less than the requested DMA memory "
4897 "size (x%x)\n", alloc_len, req_len);
4898 return -ENOMEM;
4899 }
4900 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb);
4901 if (unlikely(rc))
4902 return -EIO;
4903
4904 if (!phba->sli4_hba.intr_enable)
4905 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
4906 else {
4907 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
4908 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
4909 }
4910
4911 if (unlikely(rc))
4912 rc = -EIO;
4913 return rc;
4914}
4915
4916/**
4917 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
4918 * @phba: Pointer to HBA context object.
4919 * @type: The resource extent type to allocate.
4920 *
4921 * This function allocates the number of elements for the specified
4922 * resource type.
4923 **/
4924static int
4925lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
4926{
4927 bool emb = false;
4928 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
4929 uint16_t rsrc_id, rsrc_start, j, k;
4930 uint16_t *ids;
4931 int i, rc;
4932 unsigned long longs;
4933 unsigned long *bmask;
4934 struct lpfc_rsrc_blks *rsrc_blks;
4935 LPFC_MBOXQ_t *mbox;
4936 uint32_t length;
4937 struct lpfc_id_range *id_array = NULL;
4938 void *virtaddr = NULL;
4939 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
4940 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
4941 struct list_head *ext_blk_list;
4942
4943 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
4944 &rsrc_cnt,
4945 &rsrc_size);
4946 if (unlikely(rc))
4947 return -EIO;
4948
4949 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
4950 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4951 "3009 No available Resource Extents "
4952 "for resource type 0x%x: Count: 0x%x, "
4953 "Size 0x%x\n", type, rsrc_cnt,
4954 rsrc_size);
4955 return -ENOMEM;
4956 }
4957
4958 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT,
4959 "2903 Available Resource Extents "
4960 "for resource type 0x%x: Count: 0x%x, "
4961 "Size 0x%x\n", type, rsrc_cnt,
4962 rsrc_size);
4963
4964 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4965 if (!mbox)
4966 return -ENOMEM;
4967
4968 rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox);
4969 if (unlikely(rc)) {
4970 rc = -EIO;
4971 goto err_exit;
4972 }
4973
4974 /*
4975 * Figure out where the response is located. Then get local pointers
4976 * to the response data. The port does not guarantee to respond to
4977 * all extents counts request so update the local variable with the
4978 * allocated count from the port.
4979 */
4980 if (emb == LPFC_SLI4_MBX_EMBED) {
4981 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
4982 id_array = &rsrc_ext->u.rsp.id[0];
4983 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
4984 } else {
4985 virtaddr = mbox->sge_array->addr[0];
4986 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
4987 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
4988 id_array = &n_rsrc->id;
4989 }
4990
4991 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
4992 rsrc_id_cnt = rsrc_cnt * rsrc_size;
4993
4994 /*
4995 * Based on the resource size and count, correct the base and max
4996 * resource values.
4997 */
4998 length = sizeof(struct lpfc_rsrc_blks);
4999 switch (type) {
5000 case LPFC_RSC_TYPE_FCOE_RPI:
5001 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5002 sizeof(unsigned long),
5003 GFP_KERNEL);
5004 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5005 rc = -ENOMEM;
5006 goto err_exit;
5007 }
5008 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5009 sizeof(uint16_t),
5010 GFP_KERNEL);
5011 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5012 kfree(phba->sli4_hba.rpi_bmask);
5013 rc = -ENOMEM;
5014 goto err_exit;
5015 }
5016
5017 /*
5018 * The next_rpi was initialized with the maximum available
5019 * count but the port may allocate a smaller number. Catch
5020 * that case and update the next_rpi.
5021 */
5022 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5023
5024 /* Initialize local ptrs for common extent processing later. */
5025 bmask = phba->sli4_hba.rpi_bmask;
5026 ids = phba->sli4_hba.rpi_ids;
5027 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5028 break;
5029 case LPFC_RSC_TYPE_FCOE_VPI:
5030 phba->vpi_bmask = kzalloc(longs *
5031 sizeof(unsigned long),
5032 GFP_KERNEL);
5033 if (unlikely(!phba->vpi_bmask)) {
5034 rc = -ENOMEM;
5035 goto err_exit;
5036 }
5037 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5038 sizeof(uint16_t),
5039 GFP_KERNEL);
5040 if (unlikely(!phba->vpi_ids)) {
5041 kfree(phba->vpi_bmask);
5042 rc = -ENOMEM;
5043 goto err_exit;
5044 }
5045
5046 /* Initialize local ptrs for common extent processing later. */
5047 bmask = phba->vpi_bmask;
5048 ids = phba->vpi_ids;
5049 ext_blk_list = &phba->lpfc_vpi_blk_list;
5050 break;
5051 case LPFC_RSC_TYPE_FCOE_XRI:
5052 phba->sli4_hba.xri_bmask = kzalloc(longs *
5053 sizeof(unsigned long),
5054 GFP_KERNEL);
5055 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5056 rc = -ENOMEM;
5057 goto err_exit;
5058 }
5059 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5060 sizeof(uint16_t),
5061 GFP_KERNEL);
5062 if (unlikely(!phba->sli4_hba.xri_ids)) {
5063 kfree(phba->sli4_hba.xri_bmask);
5064 rc = -ENOMEM;
5065 goto err_exit;
5066 }
5067
5068 /* Initialize local ptrs for common extent processing later. */
5069 bmask = phba->sli4_hba.xri_bmask;
5070 ids = phba->sli4_hba.xri_ids;
5071 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5072 break;
5073 case LPFC_RSC_TYPE_FCOE_VFI:
5074 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5075 sizeof(unsigned long),
5076 GFP_KERNEL);
5077 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5078 rc = -ENOMEM;
5079 goto err_exit;
5080 }
5081 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5082 sizeof(uint16_t),
5083 GFP_KERNEL);
5084 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5085 kfree(phba->sli4_hba.vfi_bmask);
5086 rc = -ENOMEM;
5087 goto err_exit;
5088 }
5089
5090 /* Initialize local ptrs for common extent processing later. */
5091 bmask = phba->sli4_hba.vfi_bmask;
5092 ids = phba->sli4_hba.vfi_ids;
5093 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5094 break;
5095 default:
5096 /* Unsupported Opcode. Fail call. */
5097 id_array = NULL;
5098 bmask = NULL;
5099 ids = NULL;
5100 ext_blk_list = NULL;
5101 goto err_exit;
5102 }
5103
5104 /*
5105 * Complete initializing the extent configuration with the
5106 * allocated ids assigned to this function. The bitmask serves
5107 * as an index into the array and manages the available ids. The
5108 * array just stores the ids communicated to the port via the wqes.
5109 */
5110 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5111 if ((i % 2) == 0)
5112 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5113 &id_array[k]);
5114 else
5115 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5116 &id_array[k]);
5117
5118 rsrc_blks = kzalloc(length, GFP_KERNEL);
5119 if (unlikely(!rsrc_blks)) {
5120 rc = -ENOMEM;
5121 kfree(bmask);
5122 kfree(ids);
5123 goto err_exit;
5124 }
5125 rsrc_blks->rsrc_start = rsrc_id;
5126 rsrc_blks->rsrc_size = rsrc_size;
5127 list_add_tail(&rsrc_blks->list, ext_blk_list);
5128 rsrc_start = rsrc_id;
5129 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
5130 phba->sli4_hba.scsi_xri_start = rsrc_start +
5131 lpfc_sli4_get_els_iocb_cnt(phba);
5132
5133 while (rsrc_id < (rsrc_start + rsrc_size)) {
5134 ids[j] = rsrc_id;
5135 rsrc_id++;
5136 j++;
5137 }
5138 /* Entire word processed. Get next word.*/
5139 if ((i % 2) == 1)
5140 k++;
5141 }
5142 err_exit:
5143 lpfc_sli4_mbox_cmd_free(phba, mbox);
5144 return rc;
5145}
5146
5147/**
5148 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5149 * @phba: Pointer to HBA context object.
5150 * @type: the extent's type.
5151 *
5152 * This function deallocates all extents of a particular resource type.
5153 * SLI4 does not allow for deallocating a particular extent range. It
5154 * is the caller's responsibility to release all kernel memory resources.
5155 **/
5156static int
5157lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5158{
5159 int rc;
5160 uint32_t length, mbox_tmo = 0;
5161 LPFC_MBOXQ_t *mbox;
5162 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5163 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5164
5165 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5166 if (!mbox)
5167 return -ENOMEM;
5168
5169 /*
5170 * This function sends an embedded mailbox because it only sends the
5171 * the resource type. All extents of this type are released by the
5172 * port.
5173 */
5174 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5175 sizeof(struct lpfc_sli4_cfg_mhdr));
5176 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5177 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5178 length, LPFC_SLI4_MBX_EMBED);
5179
5180 /* Send an extents count of 0 - the dealloc doesn't use it. */
5181 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5182 LPFC_SLI4_MBX_EMBED);
5183 if (unlikely(rc)) {
5184 rc = -EIO;
5185 goto out_free_mbox;
5186 }
5187 if (!phba->sli4_hba.intr_enable)
5188 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5189 else {
5190 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox_tmo);
5191 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5192 }
5193 if (unlikely(rc)) {
5194 rc = -EIO;
5195 goto out_free_mbox;
5196 }
5197
5198 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5199 if (bf_get(lpfc_mbox_hdr_status,
5200 &dealloc_rsrc->header.cfg_shdr.response)) {
5201 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5202 "2919 Failed to release resource extents "
5203 "for type %d - Status 0x%x Add'l Status 0x%x. "
5204 "Resource memory not released.\n",
5205 type,
5206 bf_get(lpfc_mbox_hdr_status,
5207 &dealloc_rsrc->header.cfg_shdr.response),
5208 bf_get(lpfc_mbox_hdr_add_status,
5209 &dealloc_rsrc->header.cfg_shdr.response));
5210 rc = -EIO;
5211 goto out_free_mbox;
5212 }
5213
5214 /* Release kernel memory resources for the specific type. */
5215 switch (type) {
5216 case LPFC_RSC_TYPE_FCOE_VPI:
5217 kfree(phba->vpi_bmask);
5218 kfree(phba->vpi_ids);
5219 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5220 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5221 &phba->lpfc_vpi_blk_list, list) {
5222 list_del_init(&rsrc_blk->list);
5223 kfree(rsrc_blk);
5224 }
5225 break;
5226 case LPFC_RSC_TYPE_FCOE_XRI:
5227 kfree(phba->sli4_hba.xri_bmask);
5228 kfree(phba->sli4_hba.xri_ids);
5229 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5230 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5231 &phba->sli4_hba.lpfc_xri_blk_list, list) {
5232 list_del_init(&rsrc_blk->list);
5233 kfree(rsrc_blk);
5234 }
5235 break;
5236 case LPFC_RSC_TYPE_FCOE_VFI:
5237 kfree(phba->sli4_hba.vfi_bmask);
5238 kfree(phba->sli4_hba.vfi_ids);
5239 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5240 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5241 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5242 list_del_init(&rsrc_blk->list);
5243 kfree(rsrc_blk);
5244 }
5245 break;
5246 case LPFC_RSC_TYPE_FCOE_RPI:
5247 /* RPI bitmask and physical id array are cleaned up earlier. */
5248 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5249 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5250 list_del_init(&rsrc_blk->list);
5251 kfree(rsrc_blk);
5252 }
5253 break;
5254 default:
5255 break;
5256 }
5257
5258 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5259
5260 out_free_mbox:
5261 mempool_free(mbox, phba->mbox_mem_pool);
5262 return rc;
5263}
5264
5265/**
5266 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5267 * @phba: Pointer to HBA context object.
5268 *
5269 * This function allocates all SLI4 resource identifiers.
5270 **/
5271int
5272lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5273{
5274 int i, rc, error = 0;
5275 uint16_t count, base;
5276 unsigned long longs;
5277
5278 if (phba->sli4_hba.extents_in_use) {
5279 /*
5280 * The port supports resource extents. The XRI, VPI, VFI, RPI
5281 * resource extent count must be read and allocated before
5282 * provisioning the resource id arrays.
5283 */
5284 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5285 LPFC_IDX_RSRC_RDY) {
5286 /*
5287 * Extent-based resources are set - the driver could
5288 * be in a port reset. Figure out if any corrective
5289 * actions need to be taken.
5290 */
5291 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5292 LPFC_RSC_TYPE_FCOE_VFI);
5293 if (rc != 0)
5294 error++;
5295 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5296 LPFC_RSC_TYPE_FCOE_VPI);
5297 if (rc != 0)
5298 error++;
5299 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5300 LPFC_RSC_TYPE_FCOE_XRI);
5301 if (rc != 0)
5302 error++;
5303 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5304 LPFC_RSC_TYPE_FCOE_RPI);
5305 if (rc != 0)
5306 error++;
5307
5308 /*
5309 * It's possible that the number of resources
5310 * provided to this port instance changed between
5311 * resets. Detect this condition and reallocate
5312 * resources. Otherwise, there is no action.
5313 */
5314 if (error) {
5315 lpfc_printf_log(phba, KERN_INFO,
5316 LOG_MBOX | LOG_INIT,
5317 "2931 Detected extent resource "
5318 "change. Reallocating all "
5319 "extents.\n");
5320 rc = lpfc_sli4_dealloc_extent(phba,
5321 LPFC_RSC_TYPE_FCOE_VFI);
5322 rc = lpfc_sli4_dealloc_extent(phba,
5323 LPFC_RSC_TYPE_FCOE_VPI);
5324 rc = lpfc_sli4_dealloc_extent(phba,
5325 LPFC_RSC_TYPE_FCOE_XRI);
5326 rc = lpfc_sli4_dealloc_extent(phba,
5327 LPFC_RSC_TYPE_FCOE_RPI);
5328 } else
5329 return 0;
5330 }
5331
5332 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5333 if (unlikely(rc))
5334 goto err_exit;
5335
5336 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5337 if (unlikely(rc))
5338 goto err_exit;
5339
5340 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5341 if (unlikely(rc))
5342 goto err_exit;
5343
5344 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5345 if (unlikely(rc))
5346 goto err_exit;
5347 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5348 LPFC_IDX_RSRC_RDY);
5349 return rc;
5350 } else {
5351 /*
5352 * The port does not support resource extents. The XRI, VPI,
5353 * VFI, RPI resource ids were determined from READ_CONFIG.
5354 * Just allocate the bitmasks and provision the resource id
5355 * arrays. If a port reset is active, the resources don't
5356 * need any action - just exit.
5357 */
5358 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5359 LPFC_IDX_RSRC_RDY)
5360 return 0;
5361
5362 /* RPIs. */
5363 count = phba->sli4_hba.max_cfg_param.max_rpi;
5364 base = phba->sli4_hba.max_cfg_param.rpi_base;
5365 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5366 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5367 sizeof(unsigned long),
5368 GFP_KERNEL);
5369 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5370 rc = -ENOMEM;
5371 goto err_exit;
5372 }
5373 phba->sli4_hba.rpi_ids = kzalloc(count *
5374 sizeof(uint16_t),
5375 GFP_KERNEL);
5376 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5377 rc = -ENOMEM;
5378 goto free_rpi_bmask;
5379 }
5380
5381 for (i = 0; i < count; i++)
5382 phba->sli4_hba.rpi_ids[i] = base + i;
5383
5384 /* VPIs. */
5385 count = phba->sli4_hba.max_cfg_param.max_vpi;
5386 base = phba->sli4_hba.max_cfg_param.vpi_base;
5387 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5388 phba->vpi_bmask = kzalloc(longs *
5389 sizeof(unsigned long),
5390 GFP_KERNEL);
5391 if (unlikely(!phba->vpi_bmask)) {
5392 rc = -ENOMEM;
5393 goto free_rpi_ids;
5394 }
5395 phba->vpi_ids = kzalloc(count *
5396 sizeof(uint16_t),
5397 GFP_KERNEL);
5398 if (unlikely(!phba->vpi_ids)) {
5399 rc = -ENOMEM;
5400 goto free_vpi_bmask;
5401 }
5402
5403 for (i = 0; i < count; i++)
5404 phba->vpi_ids[i] = base + i;
5405
5406 /* XRIs. */
5407 count = phba->sli4_hba.max_cfg_param.max_xri;
5408 base = phba->sli4_hba.max_cfg_param.xri_base;
5409 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5410 phba->sli4_hba.xri_bmask = kzalloc(longs *
5411 sizeof(unsigned long),
5412 GFP_KERNEL);
5413 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5414 rc = -ENOMEM;
5415 goto free_vpi_ids;
5416 }
5417 phba->sli4_hba.xri_ids = kzalloc(count *
5418 sizeof(uint16_t),
5419 GFP_KERNEL);
5420 if (unlikely(!phba->sli4_hba.xri_ids)) {
5421 rc = -ENOMEM;
5422 goto free_xri_bmask;
5423 }
5424
5425 for (i = 0; i < count; i++)
5426 phba->sli4_hba.xri_ids[i] = base + i;
5427
5428 /* VFIs. */
5429 count = phba->sli4_hba.max_cfg_param.max_vfi;
5430 base = phba->sli4_hba.max_cfg_param.vfi_base;
5431 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5432 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5433 sizeof(unsigned long),
5434 GFP_KERNEL);
5435 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5436 rc = -ENOMEM;
5437 goto free_xri_ids;
5438 }
5439 phba->sli4_hba.vfi_ids = kzalloc(count *
5440 sizeof(uint16_t),
5441 GFP_KERNEL);
5442 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5443 rc = -ENOMEM;
5444 goto free_vfi_bmask;
5445 }
5446
5447 for (i = 0; i < count; i++)
5448 phba->sli4_hba.vfi_ids[i] = base + i;
5449
5450 /*
5451 * Mark all resources ready. An HBA reset doesn't need
5452 * to reset the initialization.
5453 */
5454 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5455 LPFC_IDX_RSRC_RDY);
5456 return 0;
5457 }
5458
5459 free_vfi_bmask:
5460 kfree(phba->sli4_hba.vfi_bmask);
5461 free_xri_ids:
5462 kfree(phba->sli4_hba.xri_ids);
5463 free_xri_bmask:
5464 kfree(phba->sli4_hba.xri_bmask);
5465 free_vpi_ids:
5466 kfree(phba->vpi_ids);
5467 free_vpi_bmask:
5468 kfree(phba->vpi_bmask);
5469 free_rpi_ids:
5470 kfree(phba->sli4_hba.rpi_ids);
5471 free_rpi_bmask:
5472 kfree(phba->sli4_hba.rpi_bmask);
5473 err_exit:
5474 return rc;
5475}
5476
5477/**
5478 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
5479 * @phba: Pointer to HBA context object.
5480 *
5481 * This function allocates the number of elements for the specified
5482 * resource type.
5483 **/
5484int
5485lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5486{
5487 if (phba->sli4_hba.extents_in_use) {
5488 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5489 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5490 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5491 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5492 } else {
5493 kfree(phba->vpi_bmask);
5494 kfree(phba->vpi_ids);
5495 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5496 kfree(phba->sli4_hba.xri_bmask);
5497 kfree(phba->sli4_hba.xri_ids);
5498 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5499 kfree(phba->sli4_hba.vfi_bmask);
5500 kfree(phba->sli4_hba.vfi_ids);
5501 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5502 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5503 }
5504
5505 return 0;
5506}
5507
5508/**
4690 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function 5509 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
4691 * @phba: Pointer to HBA context object. 5510 * @phba: Pointer to HBA context object.
4692 * 5511 *
@@ -4708,10 +5527,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4708 struct lpfc_vport *vport = phba->pport; 5527 struct lpfc_vport *vport = phba->pport;
4709 struct lpfc_dmabuf *mp; 5528 struct lpfc_dmabuf *mp;
4710 5529
4711 /*
4712 * TODO: Why does this routine execute these task in a different
4713 * order from probe?
4714 */
4715 /* Perform a PCI function reset to start from clean */ 5530 /* Perform a PCI function reset to start from clean */
4716 rc = lpfc_pci_function_reset(phba); 5531 rc = lpfc_pci_function_reset(phba);
4717 if (unlikely(rc)) 5532 if (unlikely(rc))
@@ -4740,7 +5555,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4740 * to read FCoE param config regions 5555 * to read FCoE param config regions
4741 */ 5556 */
4742 if (lpfc_sli4_read_fcoe_params(phba, mboxq)) 5557 if (lpfc_sli4_read_fcoe_params(phba, mboxq))
4743 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5558 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
4744 "2570 Failed to read FCoE parameters\n"); 5559 "2570 Failed to read FCoE parameters\n");
4745 5560
4746 /* Issue READ_REV to collect vpd and FW information. */ 5561 /* Issue READ_REV to collect vpd and FW information. */
@@ -4873,6 +5688,18 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4873 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 5688 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
4874 spin_unlock_irq(&phba->hbalock); 5689 spin_unlock_irq(&phba->hbalock);
4875 5690
5691 /*
5692 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
5693 * calls depends on these resources to complete port setup.
5694 */
5695 rc = lpfc_sli4_alloc_resource_identifiers(phba);
5696 if (rc) {
5697 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5698 "2920 Failed to alloc Resource IDs "
5699 "rc = x%x\n", rc);
5700 goto out_free_mbox;
5701 }
5702
4876 /* Read the port's service parameters. */ 5703 /* Read the port's service parameters. */
4877 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 5704 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
4878 if (rc) { 5705 if (rc) {
@@ -4906,35 +5733,37 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4906 goto out_free_mbox; 5733 goto out_free_mbox;
4907 } 5734 }
4908 5735
4909 if (phba->cfg_soft_wwnn) 5736 lpfc_update_vport_wwn(vport);
4910 u64_to_wwn(phba->cfg_soft_wwnn,
4911 vport->fc_sparam.nodeName.u.wwn);
4912 if (phba->cfg_soft_wwpn)
4913 u64_to_wwn(phba->cfg_soft_wwpn,
4914 vport->fc_sparam.portName.u.wwn);
4915 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
4916 sizeof(struct lpfc_name));
4917 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
4918 sizeof(struct lpfc_name));
4919 5737
4920 /* Update the fc_host data structures with new wwn. */ 5738 /* Update the fc_host data structures with new wwn. */
4921 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 5739 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4922 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 5740 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4923 5741
4924 /* Register SGL pool to the device using non-embedded mailbox command */ 5742 /* Register SGL pool to the device using non-embedded mailbox command */
4925 rc = lpfc_sli4_post_sgl_list(phba); 5743 if (!phba->sli4_hba.extents_in_use) {
4926 if (unlikely(rc)) { 5744 rc = lpfc_sli4_post_els_sgl_list(phba);
4927 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5745 if (unlikely(rc)) {
4928 "0582 Error %d during sgl post operation\n", 5746 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4929 rc); 5747 "0582 Error %d during els sgl post "
4930 rc = -ENODEV; 5748 "operation\n", rc);
4931 goto out_free_mbox; 5749 rc = -ENODEV;
5750 goto out_free_mbox;
5751 }
5752 } else {
5753 rc = lpfc_sli4_post_els_sgl_list_ext(phba);
5754 if (unlikely(rc)) {
5755 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5756 "2560 Error %d during els sgl post "
5757 "operation\n", rc);
5758 rc = -ENODEV;
5759 goto out_free_mbox;
5760 }
4932 } 5761 }
4933 5762
4934 /* Register SCSI SGL pool to the device */ 5763 /* Register SCSI SGL pool to the device */
4935 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 5764 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
4936 if (unlikely(rc)) { 5765 if (unlikely(rc)) {
4937 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 5766 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4938 "0383 Error %d during scsi sgl post " 5767 "0383 Error %d during scsi sgl post "
4939 "operation\n", rc); 5768 "operation\n", rc);
4940 /* Some Scsi buffers were moved to the abort scsi list */ 5769 /* Some Scsi buffers were moved to the abort scsi list */
@@ -5747,10 +6576,15 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5747 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 6576 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
5748 sizeof(struct lpfc_mcqe)); 6577 sizeof(struct lpfc_mcqe));
5749 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 6578 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
5750 6579 /*
5751 /* Prefix the mailbox status with range x4000 to note SLI4 status. */ 6580 * When the CQE status indicates a failure and the mailbox status
6581 * indicates success then copy the CQE status into the mailbox status
6582 * (and prefix it with x4000).
6583 */
5752 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 6584 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
5753 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status); 6585 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
6586 bf_set(lpfc_mqe_status, mb,
6587 (LPFC_MBX_ERROR_RANGE | mcqe_status));
5754 rc = MBXERR_ERROR; 6588 rc = MBXERR_ERROR;
5755 } else 6589 } else
5756 lpfc_sli4_swap_str(phba, mboxq); 6590 lpfc_sli4_swap_str(phba, mboxq);
@@ -5819,7 +6653,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5819 else 6653 else
5820 rc = -EIO; 6654 rc = -EIO;
5821 if (rc != MBX_SUCCESS) 6655 if (rc != MBX_SUCCESS)
5822 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6656 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
5823 "(%d):2541 Mailbox command x%x " 6657 "(%d):2541 Mailbox command x%x "
5824 "(x%x) cannot issue Data: x%x x%x\n", 6658 "(x%x) cannot issue Data: x%x x%x\n",
5825 mboxq->vport ? mboxq->vport->vpi : 0, 6659 mboxq->vport ? mboxq->vport->vpi : 0,
@@ -6307,6 +7141,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
6307 sgl->addr_hi = bpl->addrHigh; 7141 sgl->addr_hi = bpl->addrHigh;
6308 sgl->addr_lo = bpl->addrLow; 7142 sgl->addr_lo = bpl->addrLow;
6309 7143
7144 sgl->word2 = le32_to_cpu(sgl->word2);
6310 if ((i+1) == numBdes) 7145 if ((i+1) == numBdes)
6311 bf_set(lpfc_sli4_sge_last, sgl, 1); 7146 bf_set(lpfc_sli4_sge_last, sgl, 1);
6312 else 7147 else
@@ -6343,6 +7178,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
6343 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 7178 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
6344 sgl->addr_lo = 7179 sgl->addr_lo =
6345 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 7180 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
7181 sgl->word2 = le32_to_cpu(sgl->word2);
6346 bf_set(lpfc_sli4_sge_last, sgl, 1); 7182 bf_set(lpfc_sli4_sge_last, sgl, 1);
6347 sgl->word2 = cpu_to_le32(sgl->word2); 7183 sgl->word2 = cpu_to_le32(sgl->word2);
6348 sgl->sge_len = 7184 sgl->sge_len =
@@ -6474,7 +7310,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6474 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 7310 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
6475 >> LPFC_FIP_ELS_ID_SHIFT); 7311 >> LPFC_FIP_ELS_ID_SHIFT);
6476 } 7312 }
6477 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, ndlp->nlp_rpi); 7313 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
7314 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
6478 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 7315 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
6479 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 7316 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
6480 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 7317 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
@@ -6623,14 +7460,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6623 iocbq->iocb.ulpContext); 7460 iocbq->iocb.ulpContext);
6624 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 7461 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
6625 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 7462 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
6626 iocbq->vport->vpi + phba->vpi_base); 7463 phba->vpi_ids[iocbq->vport->vpi]);
6627 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 7464 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
6628 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 7465 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
6629 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 7466 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
6630 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 7467 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
6631 LPFC_WQE_LENLOC_WORD3); 7468 LPFC_WQE_LENLOC_WORD3);
6632 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 7469 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
6633 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, ndlp->nlp_rpi); 7470 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
7471 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
6634 command_type = OTHER_COMMAND; 7472 command_type = OTHER_COMMAND;
6635 break; 7473 break;
6636 case CMD_CLOSE_XRI_CN: 7474 case CMD_CLOSE_XRI_CN:
@@ -6729,6 +7567,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6729 return IOCB_ERROR; 7567 return IOCB_ERROR;
6730 break; 7568 break;
6731 } 7569 }
7570
6732 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 7571 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
6733 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 7572 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
6734 wqe->generic.wqe_com.abort_tag = abort_tag; 7573 wqe->generic.wqe_com.abort_tag = abort_tag;
@@ -6776,7 +7615,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6776 return IOCB_BUSY; 7615 return IOCB_BUSY;
6777 } 7616 }
6778 } else { 7617 } else {
6779 sglq = __lpfc_sli_get_sglq(phba, piocb); 7618 sglq = __lpfc_sli_get_sglq(phba, piocb);
6780 if (!sglq) { 7619 if (!sglq) {
6781 if (!(flag & SLI_IOCB_RET_IOCB)) { 7620 if (!(flag & SLI_IOCB_RET_IOCB)) {
6782 __lpfc_sli_ringtx_put(phba, 7621 __lpfc_sli_ringtx_put(phba,
@@ -6789,11 +7628,11 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6789 } 7628 }
6790 } 7629 }
6791 } else if (piocb->iocb_flag & LPFC_IO_FCP) { 7630 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
6792 sglq = NULL; /* These IO's already have an XRI and 7631 /* These IO's already have an XRI and a mapped sgl. */
6793 * a mapped sgl. 7632 sglq = NULL;
6794 */
6795 } else { 7633 } else {
6796 /* This is a continuation of a commandi,(CX) so this 7634 /*
7635 * This is a continuation of a commandi,(CX) so this
6797 * sglq is on the active list 7636 * sglq is on the active list
6798 */ 7637 */
6799 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag); 7638 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
@@ -6802,8 +7641,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6802 } 7641 }
6803 7642
6804 if (sglq) { 7643 if (sglq) {
7644 piocb->sli4_lxritag = sglq->sli4_lxritag;
6805 piocb->sli4_xritag = sglq->sli4_xritag; 7645 piocb->sli4_xritag = sglq->sli4_xritag;
6806
6807 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 7646 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
6808 return IOCB_ERROR; 7647 return IOCB_ERROR;
6809 } 7648 }
@@ -9799,7 +10638,12 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
9799 break; 10638 break;
9800 case LPFC_WCQ: 10639 case LPFC_WCQ:
9801 while ((cqe = lpfc_sli4_cq_get(cq))) { 10640 while ((cqe = lpfc_sli4_cq_get(cq))) {
9802 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe); 10641 if (cq->subtype == LPFC_FCP)
10642 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
10643 cqe);
10644 else
10645 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
10646 cqe);
9803 if (!(++ecount % LPFC_GET_QE_REL_INT)) 10647 if (!(++ecount % LPFC_GET_QE_REL_INT))
9804 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 10648 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
9805 } 10649 }
@@ -11446,6 +12290,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
11446 LPFC_MBOXQ_t *mbox; 12290 LPFC_MBOXQ_t *mbox;
11447 int rc; 12291 int rc;
11448 uint32_t shdr_status, shdr_add_status; 12292 uint32_t shdr_status, shdr_add_status;
12293 uint32_t mbox_tmo;
11449 union lpfc_sli4_cfg_shdr *shdr; 12294 union lpfc_sli4_cfg_shdr *shdr;
11450 12295
11451 if (xritag == NO_XRI) { 12296 if (xritag == NO_XRI) {
@@ -11479,8 +12324,10 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
11479 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 12324 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
11480 if (!phba->sli4_hba.intr_enable) 12325 if (!phba->sli4_hba.intr_enable)
11481 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12326 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
11482 else 12327 else {
11483 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 12328 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
12329 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
12330 }
11484 /* The IOCTL status is embedded in the mailbox subheader. */ 12331 /* The IOCTL status is embedded in the mailbox subheader. */
11485 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 12332 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
11486 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12333 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -11498,6 +12345,76 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
11498} 12345}
11499 12346
11500/** 12347/**
12348 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
12349 * @phba: pointer to lpfc hba data structure.
12350 *
12351 * This routine is invoked to post rpi header templates to the
12352 * port for those SLI4 ports that do not support extents. This routine
12353 * posts a PAGE_SIZE memory region to the port to hold up to
12354 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
12355 * and should be called only when interrupts are disabled.
12356 *
12357 * Return codes
12358 * 0 - successful
12359 * -ERROR - otherwise.
12360 */
12361uint16_t
12362lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
12363{
12364 unsigned long xri;
12365
12366 /*
12367 * Fetch the next logical xri. Because this index is logical,
12368 * the driver starts at 0 each time.
12369 */
12370 spin_lock_irq(&phba->hbalock);
12371 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
12372 phba->sli4_hba.max_cfg_param.max_xri, 0);
12373 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
12374 spin_unlock_irq(&phba->hbalock);
12375 return NO_XRI;
12376 } else {
12377 set_bit(xri, phba->sli4_hba.xri_bmask);
12378 phba->sli4_hba.max_cfg_param.xri_used++;
12379 phba->sli4_hba.xri_count++;
12380 }
12381
12382 spin_unlock_irq(&phba->hbalock);
12383 return xri;
12384}
12385
12386/**
12387 * lpfc_sli4_free_xri - Release an xri for reuse.
12388 * @phba: pointer to lpfc hba data structure.
12389 *
12390 * This routine is invoked to release an xri to the pool of
12391 * available rpis maintained by the driver.
12392 **/
12393void
12394__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
12395{
12396 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
12397 phba->sli4_hba.xri_count--;
12398 phba->sli4_hba.max_cfg_param.xri_used--;
12399 }
12400}
12401
12402/**
12403 * lpfc_sli4_free_xri - Release an xri for reuse.
12404 * @phba: pointer to lpfc hba data structure.
12405 *
12406 * This routine is invoked to release an xri to the pool of
12407 * available rpis maintained by the driver.
12408 **/
12409void
12410lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
12411{
12412 spin_lock_irq(&phba->hbalock);
12413 __lpfc_sli4_free_xri(phba, xri);
12414 spin_unlock_irq(&phba->hbalock);
12415}
12416
12417/**
11501 * lpfc_sli4_next_xritag - Get an xritag for the io 12418 * lpfc_sli4_next_xritag - Get an xritag for the io
11502 * @phba: Pointer to HBA context object. 12419 * @phba: Pointer to HBA context object.
11503 * 12420 *
@@ -11510,30 +12427,23 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
11510uint16_t 12427uint16_t
11511lpfc_sli4_next_xritag(struct lpfc_hba *phba) 12428lpfc_sli4_next_xritag(struct lpfc_hba *phba)
11512{ 12429{
11513 uint16_t xritag; 12430 uint16_t xri_index;
11514 12431
11515 spin_lock_irq(&phba->hbalock); 12432 xri_index = lpfc_sli4_alloc_xri(phba);
11516 xritag = phba->sli4_hba.next_xri; 12433 if (xri_index != NO_XRI)
11517 if ((xritag != (uint16_t) -1) && xritag < 12434 return xri_index;
11518 (phba->sli4_hba.max_cfg_param.max_xri 12435
11519 + phba->sli4_hba.max_cfg_param.xri_base)) { 12436 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11520 phba->sli4_hba.next_xri++;
11521 phba->sli4_hba.max_cfg_param.xri_used++;
11522 spin_unlock_irq(&phba->hbalock);
11523 return xritag;
11524 }
11525 spin_unlock_irq(&phba->hbalock);
11526 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11527 "2004 Failed to allocate XRI.last XRITAG is %d" 12437 "2004 Failed to allocate XRI.last XRITAG is %d"
11528 " Max XRI is %d, Used XRI is %d\n", 12438 " Max XRI is %d, Used XRI is %d\n",
11529 phba->sli4_hba.next_xri, 12439 xri_index,
11530 phba->sli4_hba.max_cfg_param.max_xri, 12440 phba->sli4_hba.max_cfg_param.max_xri,
11531 phba->sli4_hba.max_cfg_param.xri_used); 12441 phba->sli4_hba.max_cfg_param.xri_used);
11532 return -1; 12442 return NO_XRI;
11533} 12443}
11534 12444
11535/** 12445/**
11536 * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware. 12446 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
11537 * @phba: pointer to lpfc hba data structure. 12447 * @phba: pointer to lpfc hba data structure.
11538 * 12448 *
11539 * This routine is invoked to post a block of driver's sgl pages to the 12449 * This routine is invoked to post a block of driver's sgl pages to the
@@ -11542,7 +12452,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
11542 * stopped. 12452 * stopped.
11543 **/ 12453 **/
11544int 12454int
11545lpfc_sli4_post_sgl_list(struct lpfc_hba *phba) 12455lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
11546{ 12456{
11547 struct lpfc_sglq *sglq_entry; 12457 struct lpfc_sglq *sglq_entry;
11548 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 12458 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
@@ -11551,7 +12461,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
11551 LPFC_MBOXQ_t *mbox; 12461 LPFC_MBOXQ_t *mbox;
11552 uint32_t reqlen, alloclen, pg_pairs; 12462 uint32_t reqlen, alloclen, pg_pairs;
11553 uint32_t mbox_tmo; 12463 uint32_t mbox_tmo;
11554 uint16_t xritag_start = 0; 12464 uint16_t xritag_start = 0, lxri = 0;
11555 int els_xri_cnt, rc = 0; 12465 int els_xri_cnt, rc = 0;
11556 uint32_t shdr_status, shdr_add_status; 12466 uint32_t shdr_status, shdr_add_status;
11557 union lpfc_sli4_cfg_shdr *shdr; 12467 union lpfc_sli4_cfg_shdr *shdr;
@@ -11568,11 +12478,8 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
11568 return -ENOMEM; 12478 return -ENOMEM;
11569 } 12479 }
11570 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12480 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11571 if (!mbox) { 12481 if (!mbox)
11572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11573 "2560 Failed to allocate mbox cmd memory\n");
11574 return -ENOMEM; 12482 return -ENOMEM;
11575 }
11576 12483
11577 /* Allocate DMA memory and set up the non-embedded mailbox command */ 12484 /* Allocate DMA memory and set up the non-embedded mailbox command */
11578 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12485 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
@@ -11587,15 +12494,30 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
11587 lpfc_sli4_mbox_cmd_free(phba, mbox); 12494 lpfc_sli4_mbox_cmd_free(phba, mbox);
11588 return -ENOMEM; 12495 return -ENOMEM;
11589 } 12496 }
11590 /* Get the first SGE entry from the non-embedded DMA memory */
11591 viraddr = mbox->sge_array->addr[0];
11592
11593 /* Set up the SGL pages in the non-embedded DMA pages */ 12497 /* Set up the SGL pages in the non-embedded DMA pages */
12498 viraddr = mbox->sge_array->addr[0];
11594 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 12499 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
11595 sgl_pg_pairs = &sgl->sgl_pg_pairs; 12500 sgl_pg_pairs = &sgl->sgl_pg_pairs;
11596 12501
11597 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) { 12502 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
11598 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs]; 12503 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
12504
12505 /*
12506 * Assign the sglq a physical xri only if the driver has not
12507 * initialized those resources. A port reset only needs
12508 * the sglq's posted.
12509 */
12510 if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
12511 LPFC_XRI_RSRC_RDY) {
12512 lxri = lpfc_sli4_next_xritag(phba);
12513 if (lxri == NO_XRI) {
12514 lpfc_sli4_mbox_cmd_free(phba, mbox);
12515 return -ENOMEM;
12516 }
12517 sglq_entry->sli4_lxritag = lxri;
12518 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
12519 }
12520
11599 /* Set up the sge entry */ 12521 /* Set up the sge entry */
11600 sgl_pg_pairs->sgl_pg0_addr_lo = 12522 sgl_pg_pairs->sgl_pg0_addr_lo =
11601 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 12523 cpu_to_le32(putPaddrLow(sglq_entry->phys));
@@ -11605,16 +12527,17 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
11605 cpu_to_le32(putPaddrLow(0)); 12527 cpu_to_le32(putPaddrLow(0));
11606 sgl_pg_pairs->sgl_pg1_addr_hi = 12528 sgl_pg_pairs->sgl_pg1_addr_hi =
11607 cpu_to_le32(putPaddrHigh(0)); 12529 cpu_to_le32(putPaddrHigh(0));
12530
11608 /* Keep the first xritag on the list */ 12531 /* Keep the first xritag on the list */
11609 if (pg_pairs == 0) 12532 if (pg_pairs == 0)
11610 xritag_start = sglq_entry->sli4_xritag; 12533 xritag_start = sglq_entry->sli4_xritag;
11611 sgl_pg_pairs++; 12534 sgl_pg_pairs++;
11612 } 12535 }
12536
12537 /* Complete initialization and perform endian conversion. */
11613 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 12538 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
11614 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt); 12539 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
11615 /* Perform endian conversion if necessary */
11616 sgl->word0 = cpu_to_le32(sgl->word0); 12540 sgl->word0 = cpu_to_le32(sgl->word0);
11617
11618 if (!phba->sli4_hba.intr_enable) 12541 if (!phba->sli4_hba.intr_enable)
11619 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12542 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
11620 else { 12543 else {
@@ -11633,6 +12556,181 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
11633 shdr_status, shdr_add_status, rc); 12556 shdr_status, shdr_add_status, rc);
11634 rc = -ENXIO; 12557 rc = -ENXIO;
11635 } 12558 }
12559
12560 if (rc == 0)
12561 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
12562 LPFC_XRI_RSRC_RDY);
12563 return rc;
12564}
12565
12566/**
12567 * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port.
12568 * @phba: pointer to lpfc hba data structure.
12569 *
12570 * This routine is invoked to post a block of driver's sgl pages to the
12571 * HBA using non-embedded mailbox command. No Lock is held. This routine
12572 * is only called when the driver is loading and after all IO has been
12573 * stopped.
12574 **/
12575int
12576lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
12577{
12578 struct lpfc_sglq *sglq_entry;
12579 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
12580 struct sgl_page_pairs *sgl_pg_pairs;
12581 void *viraddr;
12582 LPFC_MBOXQ_t *mbox;
12583 uint32_t reqlen, alloclen, index;
12584 uint32_t mbox_tmo;
12585 uint16_t rsrc_start, rsrc_size, els_xri_cnt;
12586 uint16_t xritag_start = 0, lxri = 0;
12587 struct lpfc_rsrc_blks *rsrc_blk;
12588 int cnt, ttl_cnt, rc = 0;
12589 int loop_cnt;
12590 uint32_t shdr_status, shdr_add_status;
12591 union lpfc_sli4_cfg_shdr *shdr;
12592
12593 /* The number of sgls to be posted */
12594 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
12595
12596 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
12597 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
12598 if (reqlen > SLI4_PAGE_SIZE) {
12599 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12600 "2989 Block sgl registration required DMA "
12601 "size (%d) great than a page\n", reqlen);
12602 return -ENOMEM;
12603 }
12604
12605 cnt = 0;
12606 ttl_cnt = 0;
12607 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
12608 list) {
12609 rsrc_start = rsrc_blk->rsrc_start;
12610 rsrc_size = rsrc_blk->rsrc_size;
12611
12612 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12613 "3014 Working ELS Extent start %d, cnt %d\n",
12614 rsrc_start, rsrc_size);
12615
12616 loop_cnt = min(els_xri_cnt, rsrc_size);
12617 if (ttl_cnt + loop_cnt >= els_xri_cnt) {
12618 loop_cnt = els_xri_cnt - ttl_cnt;
12619 ttl_cnt = els_xri_cnt;
12620 }
12621
12622 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12623 if (!mbox)
12624 return -ENOMEM;
12625 /*
12626 * Allocate DMA memory and set up the non-embedded mailbox
12627 * command.
12628 */
12629 alloclen = lpfc_sli4_config(phba, mbox,
12630 LPFC_MBOX_SUBSYSTEM_FCOE,
12631 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
12632 reqlen, LPFC_SLI4_MBX_NEMBED);
12633 if (alloclen < reqlen) {
12634 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12635 "2987 Allocated DMA memory size (%d) "
12636 "is less than the requested DMA memory "
12637 "size (%d)\n", alloclen, reqlen);
12638 lpfc_sli4_mbox_cmd_free(phba, mbox);
12639 return -ENOMEM;
12640 }
12641
12642 /* Set up the SGL pages in the non-embedded DMA pages */
12643 viraddr = mbox->sge_array->addr[0];
12644 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
12645 sgl_pg_pairs = &sgl->sgl_pg_pairs;
12646
12647 /*
12648 * The starting resource may not begin at zero. Control
12649 * the loop variants via the block resource parameters,
12650 * but handle the sge pointers with a zero-based index
12651 * that doesn't get reset per loop pass.
12652 */
12653 for (index = rsrc_start;
12654 index < rsrc_start + loop_cnt;
12655 index++) {
12656 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt];
12657
12658 /*
12659 * Assign the sglq a physical xri only if the driver
12660 * has not initialized those resources. A port reset
12661 * only needs the sglq's posted.
12662 */
12663 if (bf_get(lpfc_xri_rsrc_rdy,
12664 &phba->sli4_hba.sli4_flags) !=
12665 LPFC_XRI_RSRC_RDY) {
12666 lxri = lpfc_sli4_next_xritag(phba);
12667 if (lxri == NO_XRI) {
12668 lpfc_sli4_mbox_cmd_free(phba, mbox);
12669 rc = -ENOMEM;
12670 goto err_exit;
12671 }
12672 sglq_entry->sli4_lxritag = lxri;
12673 sglq_entry->sli4_xritag =
12674 phba->sli4_hba.xri_ids[lxri];
12675 }
12676
12677 /* Set up the sge entry */
12678 sgl_pg_pairs->sgl_pg0_addr_lo =
12679 cpu_to_le32(putPaddrLow(sglq_entry->phys));
12680 sgl_pg_pairs->sgl_pg0_addr_hi =
12681 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
12682 sgl_pg_pairs->sgl_pg1_addr_lo =
12683 cpu_to_le32(putPaddrLow(0));
12684 sgl_pg_pairs->sgl_pg1_addr_hi =
12685 cpu_to_le32(putPaddrHigh(0));
12686
12687 /* Track the starting physical XRI for the mailbox. */
12688 if (index == rsrc_start)
12689 xritag_start = sglq_entry->sli4_xritag;
12690 sgl_pg_pairs++;
12691 cnt++;
12692 }
12693
12694 /* Complete initialization and perform endian conversion. */
12695 rsrc_blk->rsrc_used += loop_cnt;
12696 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
12697 bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt);
12698 sgl->word0 = cpu_to_le32(sgl->word0);
12699
12700 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12701 "3015 Post ELS Extent SGL, start %d, "
12702 "cnt %d, used %d\n",
12703 xritag_start, loop_cnt, rsrc_blk->rsrc_used);
12704 if (!phba->sli4_hba.intr_enable)
12705 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12706 else {
12707 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
12708 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
12709 }
12710 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
12711 shdr_status = bf_get(lpfc_mbox_hdr_status,
12712 &shdr->response);
12713 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
12714 &shdr->response);
12715 if (rc != MBX_TIMEOUT)
12716 lpfc_sli4_mbox_cmd_free(phba, mbox);
12717 if (shdr_status || shdr_add_status || rc) {
12718 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12719 "2988 POST_SGL_BLOCK mailbox "
12720 "command failed status x%x "
12721 "add_status x%x mbx status x%x\n",
12722 shdr_status, shdr_add_status, rc);
12723 rc = -ENXIO;
12724 goto err_exit;
12725 }
12726 if (ttl_cnt >= els_xri_cnt)
12727 break;
12728 }
12729
12730 err_exit:
12731 if (rc == 0)
12732 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
12733 LPFC_XRI_RSRC_RDY);
11636 return rc; 12734 return rc;
11637} 12735}
11638 12736
@@ -11693,6 +12791,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
11693 lpfc_sli4_mbox_cmd_free(phba, mbox); 12791 lpfc_sli4_mbox_cmd_free(phba, mbox);
11694 return -ENOMEM; 12792 return -ENOMEM;
11695 } 12793 }
12794
11696 /* Get the first SGE entry from the non-embedded DMA memory */ 12795 /* Get the first SGE entry from the non-embedded DMA memory */
11697 viraddr = mbox->sge_array->addr[0]; 12796 viraddr = mbox->sge_array->addr[0];
11698 12797
@@ -11748,6 +12847,169 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
11748} 12847}
11749 12848
11750/** 12849/**
12850 * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port.
12851 * @phba: pointer to lpfc hba data structure.
12852 * @sblist: pointer to scsi buffer list.
12853 * @count: number of scsi buffers on the list.
12854 *
12855 * This routine is invoked to post a block of @count scsi sgl pages from a
12856 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
12857 * No Lock is held.
12858 *
12859 **/
12860int
12861lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist,
12862 int cnt)
12863{
12864 struct lpfc_scsi_buf *psb = NULL;
12865 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
12866 struct sgl_page_pairs *sgl_pg_pairs;
12867 void *viraddr;
12868 LPFC_MBOXQ_t *mbox;
12869 uint32_t reqlen, alloclen, pg_pairs;
12870 uint32_t mbox_tmo;
12871 uint16_t xri_start = 0, scsi_xri_start;
12872 uint16_t rsrc_range;
12873 int rc = 0, avail_cnt;
12874 uint32_t shdr_status, shdr_add_status;
12875 dma_addr_t pdma_phys_bpl1;
12876 union lpfc_sli4_cfg_shdr *shdr;
12877 struct lpfc_rsrc_blks *rsrc_blk;
12878 uint32_t xri_cnt = 0;
12879
12880 /* Calculate the total requested length of the dma memory */
12881 reqlen = cnt * sizeof(struct sgl_page_pairs) +
12882 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
12883 if (reqlen > SLI4_PAGE_SIZE) {
12884 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12885 "2932 Block sgl registration required DMA "
12886 "size (%d) great than a page\n", reqlen);
12887 return -ENOMEM;
12888 }
12889
12890 /*
12891 * The use of extents requires the driver to post the sgl headers
12892 * in multiple postings to meet the contiguous resource assignment.
12893 */
12894 psb = list_prepare_entry(psb, sblist, list);
12895 scsi_xri_start = phba->sli4_hba.scsi_xri_start;
12896 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
12897 list) {
12898 rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size;
12899 if (rsrc_range < scsi_xri_start)
12900 continue;
12901 else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size)
12902 continue;
12903 else
12904 avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used;
12905
12906 reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) +
12907 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
12908 /*
12909 * Allocate DMA memory and set up the non-embedded mailbox
12910 * command. The mbox is used to post an SGL page per loop
12911 * but the DMA memory has a use-once semantic so the mailbox
12912 * is used and freed per loop pass.
12913 */
12914 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12915 if (!mbox) {
12916 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12917 "2933 Failed to allocate mbox cmd "
12918 "memory\n");
12919 return -ENOMEM;
12920 }
12921 alloclen = lpfc_sli4_config(phba, mbox,
12922 LPFC_MBOX_SUBSYSTEM_FCOE,
12923 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
12924 reqlen,
12925 LPFC_SLI4_MBX_NEMBED);
12926 if (alloclen < reqlen) {
12927 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12928 "2934 Allocated DMA memory size (%d) "
12929 "is less than the requested DMA memory "
12930 "size (%d)\n", alloclen, reqlen);
12931 lpfc_sli4_mbox_cmd_free(phba, mbox);
12932 return -ENOMEM;
12933 }
12934
12935 /* Get the first SGE entry from the non-embedded DMA memory */
12936 viraddr = mbox->sge_array->addr[0];
12937
12938 /* Set up the SGL pages in the non-embedded DMA pages */
12939 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
12940 sgl_pg_pairs = &sgl->sgl_pg_pairs;
12941
12942 /* pg_pairs tracks posted SGEs per loop iteration. */
12943 pg_pairs = 0;
12944 list_for_each_entry_continue(psb, sblist, list) {
12945 /* Set up the sge entry */
12946 sgl_pg_pairs->sgl_pg0_addr_lo =
12947 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
12948 sgl_pg_pairs->sgl_pg0_addr_hi =
12949 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
12950 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
12951 pdma_phys_bpl1 = psb->dma_phys_bpl +
12952 SGL_PAGE_SIZE;
12953 else
12954 pdma_phys_bpl1 = 0;
12955 sgl_pg_pairs->sgl_pg1_addr_lo =
12956 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
12957 sgl_pg_pairs->sgl_pg1_addr_hi =
12958 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
12959 /* Keep the first xri for this extent. */
12960 if (pg_pairs == 0)
12961 xri_start = psb->cur_iocbq.sli4_xritag;
12962 sgl_pg_pairs++;
12963 pg_pairs++;
12964 xri_cnt++;
12965
12966 /*
12967 * Track two exit conditions - the loop has constructed
12968 * all of the caller's SGE pairs or all available
12969 * resource IDs in this extent are consumed.
12970 */
12971 if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt))
12972 break;
12973 }
12974 rsrc_blk->rsrc_used += pg_pairs;
12975 bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start);
12976 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
12977
12978 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12979 "3016 Post SCSI Extent SGL, start %d, cnt %d "
12980 "blk use %d\n",
12981 xri_start, pg_pairs, rsrc_blk->rsrc_used);
12982 /* Perform endian conversion if necessary */
12983 sgl->word0 = cpu_to_le32(sgl->word0);
12984 if (!phba->sli4_hba.intr_enable)
12985 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12986 else {
12987 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
12988 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
12989 }
12990 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
12991 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12992 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
12993 &shdr->response);
12994 if (rc != MBX_TIMEOUT)
12995 lpfc_sli4_mbox_cmd_free(phba, mbox);
12996 if (shdr_status || shdr_add_status || rc) {
12997 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12998 "2935 POST_SGL_BLOCK mailbox command "
12999 "failed status x%x add_status x%x "
13000 "mbx status x%x\n",
13001 shdr_status, shdr_add_status, rc);
13002 return -ENXIO;
13003 }
13004
13005 /* Post only what is requested. */
13006 if (xri_cnt >= cnt)
13007 break;
13008 }
13009 return rc;
13010}
13011
13012/**
11751 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 13013 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
11752 * @phba: pointer to lpfc_hba struct that the frame was received on 13014 * @phba: pointer to lpfc_hba struct that the frame was received on
11753 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 13015 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
@@ -12137,6 +13399,28 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
12137} 13399}
12138 13400
12139/** 13401/**
13402 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
13403 * @phba: Pointer to HBA context object.
13404 * @xri: xri id in transaction.
13405 *
13406 * This function validates the xri maps to the known range of XRIs allocated an
13407 * used by the driver.
13408 **/
13409static uint16_t
13410lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
13411 uint16_t xri)
13412{
13413 int i;
13414
13415 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
13416 if (xri == phba->sli4_hba.xri_ids[i])
13417 return i;
13418 }
13419 return NO_XRI;
13420}
13421
13422
13423/**
12140 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 13424 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
12141 * @phba: Pointer to HBA context object. 13425 * @phba: Pointer to HBA context object.
12142 * @fc_hdr: pointer to a FC frame header. 13426 * @fc_hdr: pointer to a FC frame header.
@@ -12169,9 +13453,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
12169 "SID:x%x\n", oxid, sid); 13453 "SID:x%x\n", oxid, sid);
12170 return; 13454 return;
12171 } 13455 }
12172 if (rxid >= phba->sli4_hba.max_cfg_param.xri_base 13456 if (lpfc_sli4_xri_inrange(phba, rxid))
12173 && rxid <= (phba->sli4_hba.max_cfg_param.max_xri
12174 + phba->sli4_hba.max_cfg_param.xri_base))
12175 lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0); 13457 lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
12176 13458
12177 /* Allocate buffer for rsp iocb */ 13459 /* Allocate buffer for rsp iocb */
@@ -12194,12 +13476,13 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
12194 icmd->ulpBdeCount = 0; 13476 icmd->ulpBdeCount = 0;
12195 icmd->ulpLe = 1; 13477 icmd->ulpLe = 1;
12196 icmd->ulpClass = CLASS3; 13478 icmd->ulpClass = CLASS3;
12197 icmd->ulpContext = ndlp->nlp_rpi; 13479 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
12198 ctiocb->context1 = ndlp; 13480 ctiocb->context1 = ndlp;
12199 13481
12200 ctiocb->iocb_cmpl = NULL; 13482 ctiocb->iocb_cmpl = NULL;
12201 ctiocb->vport = phba->pport; 13483 ctiocb->vport = phba->pport;
12202 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 13484 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
13485 ctiocb->sli4_lxritag = NO_XRI;
12203 ctiocb->sli4_xritag = NO_XRI; 13486 ctiocb->sli4_xritag = NO_XRI;
12204 13487
12205 /* If the oxid maps to the FCP XRI range or if it is out of range, 13488 /* If the oxid maps to the FCP XRI range or if it is out of range,
@@ -12380,8 +13663,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
12380 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 13663 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
12381 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 13664 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
12382 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id); 13665 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
12383 first_iocbq->iocb.unsli3.rcvsli3.vpi = 13666 /* iocbq is prepped for internal consumption. Logical vpi. */
12384 vport->vpi + vport->phba->vpi_base; 13667 first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->vpi;
12385 /* put the first buffer into the first IOCBq */ 13668 /* put the first buffer into the first IOCBq */
12386 first_iocbq->context2 = &seq_dmabuf->dbuf; 13669 first_iocbq->context2 = &seq_dmabuf->dbuf;
12387 first_iocbq->context3 = NULL; 13670 first_iocbq->context3 = NULL;
@@ -12461,7 +13744,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
12461 &phba->sli.ring[LPFC_ELS_RING], 13744 &phba->sli.ring[LPFC_ELS_RING],
12462 iocbq, fc_hdr->fh_r_ctl, 13745 iocbq, fc_hdr->fh_r_ctl,
12463 fc_hdr->fh_type)) 13746 fc_hdr->fh_type))
12464 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13747 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12465 "2540 Ring %d handler: unexpected Rctl " 13748 "2540 Ring %d handler: unexpected Rctl "
12466 "x%x Type x%x received\n", 13749 "x%x Type x%x received\n",
12467 LPFC_ELS_RING, 13750 LPFC_ELS_RING,
@@ -12558,9 +13841,24 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
12558{ 13841{
12559 struct lpfc_rpi_hdr *rpi_page; 13842 struct lpfc_rpi_hdr *rpi_page;
12560 uint32_t rc = 0; 13843 uint32_t rc = 0;
13844 uint16_t lrpi = 0;
13845
13846 /* SLI4 ports that support extents do not require RPI headers. */
13847 if (!phba->sli4_hba.rpi_hdrs_in_use)
13848 goto exit;
13849 if (phba->sli4_hba.extents_in_use)
13850 return -EIO;
12561 13851
12562 /* Post all rpi memory regions to the port. */
12563 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 13852 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
13853 /*
13854 * Assign the rpi headers a physical rpi only if the driver
13855 * has not initialized those resources. A port reset only
13856 * needs the headers posted.
13857 */
13858 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
13859 LPFC_RPI_RSRC_RDY)
13860 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
13861
12564 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 13862 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
12565 if (rc != MBX_SUCCESS) { 13863 if (rc != MBX_SUCCESS) {
12566 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13864 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12571,6 +13869,9 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
12571 } 13869 }
12572 } 13870 }
12573 13871
13872 exit:
13873 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
13874 LPFC_RPI_RSRC_RDY);
12574 return rc; 13875 return rc;
12575} 13876}
12576 13877
@@ -12594,10 +13895,15 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
12594 LPFC_MBOXQ_t *mboxq; 13895 LPFC_MBOXQ_t *mboxq;
12595 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 13896 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
12596 uint32_t rc = 0; 13897 uint32_t rc = 0;
12597 uint32_t mbox_tmo;
12598 uint32_t shdr_status, shdr_add_status; 13898 uint32_t shdr_status, shdr_add_status;
12599 union lpfc_sli4_cfg_shdr *shdr; 13899 union lpfc_sli4_cfg_shdr *shdr;
12600 13900
13901 /* SLI4 ports that support extents do not require RPI headers. */
13902 if (!phba->sli4_hba.rpi_hdrs_in_use)
13903 return rc;
13904 if (phba->sli4_hba.extents_in_use)
13905 return -EIO;
13906
12601 /* The port is notified of the header region via a mailbox command. */ 13907 /* The port is notified of the header region via a mailbox command. */
12602 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13908 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12603 if (!mboxq) { 13909 if (!mboxq) {
@@ -12609,16 +13915,19 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
12609 13915
12610 /* Post all rpi memory regions to the port. */ 13916 /* Post all rpi memory regions to the port. */
12611 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 13917 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
12612 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
12613 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 13918 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
12614 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 13919 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
12615 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 13920 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
12616 sizeof(struct lpfc_sli4_cfg_mhdr), 13921 sizeof(struct lpfc_sli4_cfg_mhdr),
12617 LPFC_SLI4_MBX_EMBED); 13922 LPFC_SLI4_MBX_EMBED);
12618 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 13923
12619 hdr_tmpl, rpi_page->page_count); 13924
13925 /* Post the physical rpi to the port for this rpi header. */
12620 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 13926 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
12621 rpi_page->start_rpi); 13927 rpi_page->start_rpi);
13928 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
13929 hdr_tmpl, rpi_page->page_count);
13930
12622 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 13931 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
12623 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 13932 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
12624 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 13933 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@@ -12653,22 +13962,21 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
12653int 13962int
12654lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 13963lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
12655{ 13964{
12656 int rpi; 13965 unsigned long rpi;
12657 uint16_t max_rpi, rpi_base, rpi_limit; 13966 uint16_t max_rpi, rpi_limit;
12658 uint16_t rpi_remaining; 13967 uint16_t rpi_remaining, lrpi = 0;
12659 struct lpfc_rpi_hdr *rpi_hdr; 13968 struct lpfc_rpi_hdr *rpi_hdr;
12660 13969
12661 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 13970 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
12662 rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
12663 rpi_limit = phba->sli4_hba.next_rpi; 13971 rpi_limit = phba->sli4_hba.next_rpi;
12664 13972
12665 /* 13973 /*
12666 * The valid rpi range is not guaranteed to be zero-based. Start 13974 * Fetch the next logical rpi. Because this index is logical,
12667 * the search at the rpi_base as reported by the port. 13975 * the driver starts at 0 each time.
12668 */ 13976 */
12669 spin_lock_irq(&phba->hbalock); 13977 spin_lock_irq(&phba->hbalock);
12670 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base); 13978 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
12671 if (rpi >= rpi_limit || rpi < rpi_base) 13979 if (rpi >= rpi_limit)
12672 rpi = LPFC_RPI_ALLOC_ERROR; 13980 rpi = LPFC_RPI_ALLOC_ERROR;
12673 else { 13981 else {
12674 set_bit(rpi, phba->sli4_hba.rpi_bmask); 13982 set_bit(rpi, phba->sli4_hba.rpi_bmask);
@@ -12678,7 +13986,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
12678 13986
12679 /* 13987 /*
12680 * Don't try to allocate more rpi header regions if the device limit 13988 * Don't try to allocate more rpi header regions if the device limit
12681 * on available rpis max has been exhausted. 13989 * has been exhausted.
12682 */ 13990 */
12683 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 13991 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
12684 (phba->sli4_hba.rpi_count >= max_rpi)) { 13992 (phba->sli4_hba.rpi_count >= max_rpi)) {
@@ -12687,13 +13995,21 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
12687 } 13995 }
12688 13996
12689 /* 13997 /*
13998 * RPI header postings are not required for SLI4 ports capable of
13999 * extents.
14000 */
14001 if (!phba->sli4_hba.rpi_hdrs_in_use) {
14002 spin_unlock_irq(&phba->hbalock);
14003 return rpi;
14004 }
14005
14006 /*
12690 * If the driver is running low on rpi resources, allocate another 14007 * If the driver is running low on rpi resources, allocate another
12691 * page now. Note that the next_rpi value is used because 14008 * page now. Note that the next_rpi value is used because
12692 * it represents how many are actually in use whereas max_rpi notes 14009 * it represents how many are actually in use whereas max_rpi notes
12693 * how many are supported max by the device. 14010 * how many are supported max by the device.
12694 */ 14011 */
12695 rpi_remaining = phba->sli4_hba.next_rpi - rpi_base - 14012 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
12696 phba->sli4_hba.rpi_count;
12697 spin_unlock_irq(&phba->hbalock); 14013 spin_unlock_irq(&phba->hbalock);
12698 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 14014 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
12699 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 14015 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
@@ -12702,6 +14018,8 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
12702 "2002 Error Could not grow rpi " 14018 "2002 Error Could not grow rpi "
12703 "count\n"); 14019 "count\n");
12704 } else { 14020 } else {
14021 lrpi = rpi_hdr->start_rpi;
14022 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
12705 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 14023 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
12706 } 14024 }
12707 } 14025 }
@@ -12751,6 +14069,8 @@ void
12751lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 14069lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
12752{ 14070{
12753 kfree(phba->sli4_hba.rpi_bmask); 14071 kfree(phba->sli4_hba.rpi_bmask);
14072 kfree(phba->sli4_hba.rpi_ids);
14073 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
12754} 14074}
12755 14075
12756/** 14076/**
@@ -13490,6 +14810,96 @@ out:
13490} 14810}
13491 14811
13492/** 14812/**
14813 * lpfc_wr_object - write an object to the firmware
14814 * @phba: HBA structure that indicates port to create a queue on.
14815 * @dmabuf_list: list of dmabufs to write to the port.
14816 * @size: the total byte value of the objects to write to the port.
14817 * @offset: the current offset to be used to start the transfer.
14818 *
14819 * This routine will create a wr_object mailbox command to send to the port.
14820 * the mailbox command will be constructed using the dma buffers described in
14821 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
14822 * BDEs that the imbedded mailbox can support. The @offset variable will be
14823 * used to indicate the starting offset of the transfer and will also return
14824 * the offset after the write object mailbox has completed. @size is used to
14825 * determine the end of the object and whether the eof bit should be set.
14826 *
14827 * Return 0 is successful and offset will contain the the new offset to use
14828 * for the next write.
14829 * Return negative value for error cases.
14830 **/
14831int
14832lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
14833 uint32_t size, uint32_t *offset)
14834{
14835 struct lpfc_mbx_wr_object *wr_object;
14836 LPFC_MBOXQ_t *mbox;
14837 int rc = 0, i = 0;
14838 uint32_t shdr_status, shdr_add_status;
14839 uint32_t mbox_tmo;
14840 union lpfc_sli4_cfg_shdr *shdr;
14841 struct lpfc_dmabuf *dmabuf;
14842 uint32_t written = 0;
14843
14844 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14845 if (!mbox)
14846 return -ENOMEM;
14847
14848 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14849 LPFC_MBOX_OPCODE_WRITE_OBJECT,
14850 sizeof(struct lpfc_mbx_wr_object) -
14851 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
14852
14853 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
14854 wr_object->u.request.write_offset = *offset;
14855 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
14856 wr_object->u.request.object_name[0] =
14857 cpu_to_le32(wr_object->u.request.object_name[0]);
14858 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
14859 list_for_each_entry(dmabuf, dmabuf_list, list) {
14860 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
14861 break;
14862 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
14863 wr_object->u.request.bde[i].addrHigh =
14864 putPaddrHigh(dmabuf->phys);
14865 if (written + SLI4_PAGE_SIZE >= size) {
14866 wr_object->u.request.bde[i].tus.f.bdeSize =
14867 (size - written);
14868 written += (size - written);
14869 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
14870 } else {
14871 wr_object->u.request.bde[i].tus.f.bdeSize =
14872 SLI4_PAGE_SIZE;
14873 written += SLI4_PAGE_SIZE;
14874 }
14875 i++;
14876 }
14877 wr_object->u.request.bde_count = i;
14878 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
14879 if (!phba->sli4_hba.intr_enable)
14880 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14881 else {
14882 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
14883 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
14884 }
14885 /* The IOCTL status is embedded in the mailbox subheader. */
14886 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
14887 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14888 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14889 if (rc != MBX_TIMEOUT)
14890 mempool_free(mbox, phba->mbox_mem_pool);
14891 if (shdr_status || shdr_add_status || rc) {
14892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14893 "3025 Write Object mailbox failed with "
14894 "status x%x add_status x%x, mbx status x%x\n",
14895 shdr_status, shdr_add_status, rc);
14896 rc = -ENXIO;
14897 } else
14898 *offset += wr_object->u.response.actual_write_length;
14899 return rc;
14900}
14901
14902/**
13493 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 14903 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
13494 * @vport: pointer to vport data structure. 14904 * @vport: pointer to vport data structure.
13495 * 14905 *
@@ -13644,7 +15054,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
13644 * never happen 15054 * never happen
13645 */ 15055 */
13646 sglq = __lpfc_clear_active_sglq(phba, 15056 sglq = __lpfc_clear_active_sglq(phba,
13647 sglq->sli4_xritag); 15057 sglq->sli4_lxritag);
13648 spin_unlock_irqrestore(&phba->hbalock, iflags); 15058 spin_unlock_irqrestore(&phba->hbalock, iflags);
13649 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15059 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13650 "2823 txq empty and txq_cnt is %d\n ", 15060 "2823 txq empty and txq_cnt is %d\n ",
@@ -13656,6 +15066,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
13656 /* The xri and iocb resources secured, 15066 /* The xri and iocb resources secured,
13657 * attempt to issue request 15067 * attempt to issue request
13658 */ 15068 */
15069 piocbq->sli4_lxritag = sglq->sli4_lxritag;
13659 piocbq->sli4_xritag = sglq->sli4_xritag; 15070 piocbq->sli4_xritag = sglq->sli4_xritag;
13660 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 15071 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
13661 fail_msg = "to convert bpl to sgl"; 15072 fail_msg = "to convert bpl to sgl";
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 453577c21c14..a0075b0af142 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -52,6 +52,7 @@ struct lpfc_iocbq {
52 struct list_head clist; 52 struct list_head clist;
53 struct list_head dlist; 53 struct list_head dlist;
54 uint16_t iotag; /* pre-assigned IO tag */ 54 uint16_t iotag; /* pre-assigned IO tag */
55 uint16_t sli4_lxritag; /* logical pre-assigned XRI. */
55 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 56 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
56 struct lpfc_cq_event cq_event; 57 struct lpfc_cq_event cq_event;
57 58
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 1a3cbf88f2ce..4b1703554a26 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -310,7 +310,6 @@ struct lpfc_max_cfg_param {
310 uint16_t vfi_base; 310 uint16_t vfi_base;
311 uint16_t vfi_used; 311 uint16_t vfi_used;
312 uint16_t max_fcfi; 312 uint16_t max_fcfi;
313 uint16_t fcfi_base;
314 uint16_t fcfi_used; 313 uint16_t fcfi_used;
315 uint16_t max_eq; 314 uint16_t max_eq;
316 uint16_t max_rq; 315 uint16_t max_rq;
@@ -365,6 +364,11 @@ struct lpfc_pc_sli4_params {
365 uint8_t rqv; 364 uint8_t rqv;
366}; 365};
367 366
367struct lpfc_iov {
368 uint32_t pf_number;
369 uint32_t vf_number;
370};
371
368/* SLI4 HBA data structure entries */ 372/* SLI4 HBA data structure entries */
369struct lpfc_sli4_hba { 373struct lpfc_sli4_hba {
370 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for 374 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -444,10 +448,13 @@ struct lpfc_sli4_hba {
444 uint32_t intr_enable; 448 uint32_t intr_enable;
445 struct lpfc_bmbx bmbx; 449 struct lpfc_bmbx bmbx;
446 struct lpfc_max_cfg_param max_cfg_param; 450 struct lpfc_max_cfg_param max_cfg_param;
451 uint16_t extents_in_use; /* must allocate resource extents. */
452 uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
447 uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */ 453 uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
448 uint16_t next_rpi; 454 uint16_t next_rpi;
449 uint16_t scsi_xri_max; 455 uint16_t scsi_xri_max;
450 uint16_t scsi_xri_cnt; 456 uint16_t scsi_xri_cnt;
457 uint16_t scsi_xri_start;
451 struct list_head lpfc_free_sgl_list; 458 struct list_head lpfc_free_sgl_list;
452 struct list_head lpfc_sgl_list; 459 struct list_head lpfc_sgl_list;
453 struct lpfc_sglq **lpfc_els_sgl_array; 460 struct lpfc_sglq **lpfc_els_sgl_array;
@@ -458,7 +465,17 @@ struct lpfc_sli4_hba {
458 struct lpfc_sglq **lpfc_sglq_active_list; 465 struct lpfc_sglq **lpfc_sglq_active_list;
459 struct list_head lpfc_rpi_hdr_list; 466 struct list_head lpfc_rpi_hdr_list;
460 unsigned long *rpi_bmask; 467 unsigned long *rpi_bmask;
468 uint16_t *rpi_ids;
461 uint16_t rpi_count; 469 uint16_t rpi_count;
470 struct list_head lpfc_rpi_blk_list;
471 unsigned long *xri_bmask;
472 uint16_t *xri_ids;
473 uint16_t xri_count;
474 struct list_head lpfc_xri_blk_list;
475 unsigned long *vfi_bmask;
476 uint16_t *vfi_ids;
477 uint16_t vfi_count;
478 struct list_head lpfc_vfi_blk_list;
462 struct lpfc_sli4_flags sli4_flags; 479 struct lpfc_sli4_flags sli4_flags;
463 struct list_head sp_queue_event; 480 struct list_head sp_queue_event;
464 struct list_head sp_cqe_event_pool; 481 struct list_head sp_cqe_event_pool;
@@ -467,6 +484,7 @@ struct lpfc_sli4_hba {
467 struct list_head sp_els_xri_aborted_work_queue; 484 struct list_head sp_els_xri_aborted_work_queue;
468 struct list_head sp_unsol_work_queue; 485 struct list_head sp_unsol_work_queue;
469 struct lpfc_sli4_link link_state; 486 struct lpfc_sli4_link link_state;
487 struct lpfc_iov iov;
470 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ 488 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
471 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ 489 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
472}; 490};
@@ -490,6 +508,7 @@ struct lpfc_sglq {
490 enum lpfc_sgl_state state; 508 enum lpfc_sgl_state state;
491 struct lpfc_nodelist *ndlp; /* ndlp associated with IO */ 509 struct lpfc_nodelist *ndlp; /* ndlp associated with IO */
492 uint16_t iotag; /* pre-assigned IO tag */ 510 uint16_t iotag; /* pre-assigned IO tag */
511 uint16_t sli4_lxritag; /* logical pre-assigned xri. */
493 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 512 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
494 struct sli4_sge *sgl; /* pre-assigned SGL */ 513 struct sli4_sge *sgl; /* pre-assigned SGL */
495 void *virt; /* virtual address. */ 514 void *virt; /* virtual address. */
@@ -504,6 +523,13 @@ struct lpfc_rpi_hdr {
504 uint32_t start_rpi; 523 uint32_t start_rpi;
505}; 524};
506 525
526struct lpfc_rsrc_blks {
527 struct list_head list;
528 uint16_t rsrc_start;
529 uint16_t rsrc_size;
530 uint16_t rsrc_used;
531};
532
507/* 533/*
508 * SLI4 specific function prototypes 534 * SLI4 specific function prototypes
509 */ 535 */
@@ -543,8 +569,11 @@ int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
543int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); 569int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
544uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); 570uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
545int lpfc_sli4_post_async_mbox(struct lpfc_hba *); 571int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
546int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba); 572int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba);
573int lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba);
547int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int); 574int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
575int lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *, struct list_head *,
576 int);
548struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *); 577struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
549struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *); 578struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
550void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *); 579void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 30ba5440c67a..1feb551a57bc 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -83,7 +83,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport,
83static int 83static int
84lpfc_alloc_vpi(struct lpfc_hba *phba) 84lpfc_alloc_vpi(struct lpfc_hba *phba)
85{ 85{
86 int vpi; 86 unsigned long vpi;
87 87
88 spin_lock_irq(&phba->hbalock); 88 spin_lock_irq(&phba->hbalock);
89 /* Start at bit 1 because vpi zero is reserved for the physical port */ 89 /* Start at bit 1 because vpi zero is reserved for the physical port */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 046dcc672ec1..7370c084b178 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
33/* 33/*
34 * MegaRAID SAS Driver meta data 34 * MegaRAID SAS Driver meta data
35 */ 35 */
36#define MEGASAS_VERSION "00.00.05.34-rc1" 36#define MEGASAS_VERSION "00.00.05.38-rc1"
37#define MEGASAS_RELDATE "Feb. 24, 2011" 37#define MEGASAS_RELDATE "May. 11, 2011"
38#define MEGASAS_EXT_VERSION "Thu. Feb. 24 17:00:00 PDT 2011" 38#define MEGASAS_EXT_VERSION "Wed. May. 11 17:00:00 PDT 2011"
39 39
40/* 40/*
41 * Device IDs 41 * Device IDs
@@ -76,8 +76,8 @@
76#define MFI_STATE_READY 0xB0000000 76#define MFI_STATE_READY 0xB0000000
77#define MFI_STATE_OPERATIONAL 0xC0000000 77#define MFI_STATE_OPERATIONAL 0xC0000000
78#define MFI_STATE_FAULT 0xF0000000 78#define MFI_STATE_FAULT 0xF0000000
79#define MFI_RESET_REQUIRED 0x00000001 79#define MFI_RESET_REQUIRED 0x00000001
80 80#define MFI_RESET_ADAPTER 0x00000002
81#define MEGAMFI_FRAME_SIZE 64 81#define MEGAMFI_FRAME_SIZE 64
82 82
83/* 83/*
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 89c623ebadbc..2d8cdce7b2f5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_base.c 20 * FILE: megaraid_sas_base.c
21 * Version : v00.00.05.34-rc1 21 * Version : v00.00.05.38-rc1
22 * 22 *
23 * Authors: LSI Corporation 23 * Authors: LSI Corporation
24 * Sreenivas Bagalkote 24 * Sreenivas Bagalkote
@@ -437,15 +437,18 @@ megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
437static int 437static int
438megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) 438megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
439{ 439{
440 u32 status; 440 u32 status, mfiStatus = 0;
441
441 /* 442 /*
442 * Check if it is our interrupt 443 * Check if it is our interrupt
443 */ 444 */
444 status = readl(&regs->outbound_intr_status); 445 status = readl(&regs->outbound_intr_status);
445 446
446 if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) { 447 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
447 return 0; 448 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
448 } 449
450 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
451 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
449 452
450 /* 453 /*
451 * Clear the interrupt by writing back the same value 454 * Clear the interrupt by writing back the same value
@@ -455,8 +458,9 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
455 /* Dummy readl to force pci flush */ 458 /* Dummy readl to force pci flush */
456 readl(&regs->outbound_doorbell_clear); 459 readl(&regs->outbound_doorbell_clear);
457 460
458 return 1; 461 return mfiStatus;
459} 462}
463
460/** 464/**
461 * megasas_fire_cmd_ppc - Sends command to the FW 465 * megasas_fire_cmd_ppc - Sends command to the FW
462 * @frame_phys_addr : Physical address of cmd 466 * @frame_phys_addr : Physical address of cmd
@@ -477,17 +481,6 @@ megasas_fire_cmd_ppc(struct megasas_instance *instance,
477} 481}
478 482
479/** 483/**
480 * megasas_adp_reset_ppc - For controller reset
481 * @regs: MFI register set
482 */
483static int
484megasas_adp_reset_ppc(struct megasas_instance *instance,
485 struct megasas_register_set __iomem *regs)
486{
487 return 0;
488}
489
490/**
491 * megasas_check_reset_ppc - For controller reset check 484 * megasas_check_reset_ppc - For controller reset check
492 * @regs: MFI register set 485 * @regs: MFI register set
493 */ 486 */
@@ -495,8 +488,12 @@ static int
495megasas_check_reset_ppc(struct megasas_instance *instance, 488megasas_check_reset_ppc(struct megasas_instance *instance,
496 struct megasas_register_set __iomem *regs) 489 struct megasas_register_set __iomem *regs)
497{ 490{
491 if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
492 return 1;
493
498 return 0; 494 return 0;
499} 495}
496
500static struct megasas_instance_template megasas_instance_template_ppc = { 497static struct megasas_instance_template megasas_instance_template_ppc = {
501 498
502 .fire_cmd = megasas_fire_cmd_ppc, 499 .fire_cmd = megasas_fire_cmd_ppc,
@@ -504,7 +501,7 @@ static struct megasas_instance_template megasas_instance_template_ppc = {
504 .disable_intr = megasas_disable_intr_ppc, 501 .disable_intr = megasas_disable_intr_ppc,
505 .clear_intr = megasas_clear_intr_ppc, 502 .clear_intr = megasas_clear_intr_ppc,
506 .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 503 .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
507 .adp_reset = megasas_adp_reset_ppc, 504 .adp_reset = megasas_adp_reset_xscale,
508 .check_reset = megasas_check_reset_ppc, 505 .check_reset = megasas_check_reset_ppc,
509 .service_isr = megasas_isr, 506 .service_isr = megasas_isr,
510 .tasklet = megasas_complete_cmd_dpc, 507 .tasklet = megasas_complete_cmd_dpc,
@@ -620,6 +617,9 @@ static int
620megasas_check_reset_skinny(struct megasas_instance *instance, 617megasas_check_reset_skinny(struct megasas_instance *instance,
621 struct megasas_register_set __iomem *regs) 618 struct megasas_register_set __iomem *regs)
622{ 619{
620 if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
621 return 1;
622
623 return 0; 623 return 0;
624} 624}
625 625
@@ -3454,7 +3454,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
3454{ 3454{
3455 u32 max_sectors_1; 3455 u32 max_sectors_1;
3456 u32 max_sectors_2; 3456 u32 max_sectors_2;
3457 u32 tmp_sectors; 3457 u32 tmp_sectors, msix_enable;
3458 struct megasas_register_set __iomem *reg_set; 3458 struct megasas_register_set __iomem *reg_set;
3459 struct megasas_ctrl_info *ctrl_info; 3459 struct megasas_ctrl_info *ctrl_info;
3460 unsigned long bar_list; 3460 unsigned long bar_list;
@@ -3507,6 +3507,13 @@ static int megasas_init_fw(struct megasas_instance *instance)
3507 if (megasas_transition_to_ready(instance)) 3507 if (megasas_transition_to_ready(instance))
3508 goto fail_ready_state; 3508 goto fail_ready_state;
3509 3509
3510 /* Check if MSI-X is supported while in ready state */
3511 msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
3512 0x4000000) >> 0x1a;
3513 if (msix_enable && !msix_disable &&
3514 !pci_enable_msix(instance->pdev, &instance->msixentry, 1))
3515 instance->msi_flag = 1;
3516
3510 /* Get operational params, sge flags, send init cmd to controller */ 3517 /* Get operational params, sge flags, send init cmd to controller */
3511 if (instance->instancet->init_adapter(instance)) 3518 if (instance->instancet->init_adapter(instance))
3512 goto fail_init_adapter; 3519 goto fail_init_adapter;
@@ -4076,14 +4083,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4076 else 4083 else
4077 INIT_WORK(&instance->work_init, process_fw_state_change_wq); 4084 INIT_WORK(&instance->work_init, process_fw_state_change_wq);
4078 4085
4079 /* Try to enable MSI-X */
4080 if ((instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078R) &&
4081 (instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078DE) &&
4082 (instance->pdev->device != PCI_DEVICE_ID_LSI_VERDE_ZCR) &&
4083 !msix_disable && !pci_enable_msix(instance->pdev,
4084 &instance->msixentry, 1))
4085 instance->msi_flag = 1;
4086
4087 /* 4086 /*
4088 * Initialize MFI Firmware 4087 * Initialize MFI Firmware
4089 */ 4088 */
@@ -4116,6 +4115,14 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4116 megasas_mgmt_info.max_index++; 4115 megasas_mgmt_info.max_index++;
4117 4116
4118 /* 4117 /*
4118 * Register with SCSI mid-layer
4119 */
4120 if (megasas_io_attach(instance))
4121 goto fail_io_attach;
4122
4123 instance->unload = 0;
4124
4125 /*
4119 * Initiate AEN (Asynchronous Event Notification) 4126 * Initiate AEN (Asynchronous Event Notification)
4120 */ 4127 */
4121 if (megasas_start_aen(instance)) { 4128 if (megasas_start_aen(instance)) {
@@ -4123,13 +4130,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4123 goto fail_start_aen; 4130 goto fail_start_aen;
4124 } 4131 }
4125 4132
4126 /*
4127 * Register with SCSI mid-layer
4128 */
4129 if (megasas_io_attach(instance))
4130 goto fail_io_attach;
4131
4132 instance->unload = 0;
4133 return 0; 4133 return 0;
4134 4134
4135 fail_start_aen: 4135 fail_start_aen:
@@ -4332,10 +4332,6 @@ megasas_resume(struct pci_dev *pdev)
4332 if (megasas_set_dma_mask(pdev)) 4332 if (megasas_set_dma_mask(pdev))
4333 goto fail_set_dma_mask; 4333 goto fail_set_dma_mask;
4334 4334
4335 /* Now re-enable MSI-X */
4336 if (instance->msi_flag)
4337 pci_enable_msix(instance->pdev, &instance->msixentry, 1);
4338
4339 /* 4335 /*
4340 * Initialize MFI Firmware 4336 * Initialize MFI Firmware
4341 */ 4337 */
@@ -4348,6 +4344,10 @@ megasas_resume(struct pci_dev *pdev)
4348 if (megasas_transition_to_ready(instance)) 4344 if (megasas_transition_to_ready(instance))
4349 goto fail_ready_state; 4345 goto fail_ready_state;
4350 4346
4347 /* Now re-enable MSI-X */
4348 if (instance->msi_flag)
4349 pci_enable_msix(instance->pdev, &instance->msixentry, 1);
4350
4351 switch (instance->pdev->device) { 4351 switch (instance->pdev->device) {
4352 case PCI_DEVICE_ID_LSI_FUSION: 4352 case PCI_DEVICE_ID_LSI_FUSION:
4353 { 4353 {
@@ -4384,12 +4384,6 @@ megasas_resume(struct pci_dev *pdev)
4384 4384
4385 instance->instancet->enable_intr(instance->reg_set); 4385 instance->instancet->enable_intr(instance->reg_set);
4386 4386
4387 /*
4388 * Initiate AEN (Asynchronous Event Notification)
4389 */
4390 if (megasas_start_aen(instance))
4391 printk(KERN_ERR "megasas: Start AEN failed\n");
4392
4393 /* Initialize the cmd completion timer */ 4387 /* Initialize the cmd completion timer */
4394 if (poll_mode_io) 4388 if (poll_mode_io)
4395 megasas_start_timer(instance, &instance->io_completion_timer, 4389 megasas_start_timer(instance, &instance->io_completion_timer,
@@ -4397,6 +4391,12 @@ megasas_resume(struct pci_dev *pdev)
4397 MEGASAS_COMPLETION_TIMER_INTERVAL); 4391 MEGASAS_COMPLETION_TIMER_INTERVAL);
4398 instance->unload = 0; 4392 instance->unload = 0;
4399 4393
4394 /*
4395 * Initiate AEN (Asynchronous Event Notification)
4396 */
4397 if (megasas_start_aen(instance))
4398 printk(KERN_ERR "megasas: Start AEN failed\n");
4399
4400 return 0; 4400 return 0;
4401 4401
4402fail_irq: 4402fail_irq:
@@ -4527,6 +4527,11 @@ static void megasas_shutdown(struct pci_dev *pdev)
4527 instance->unload = 1; 4527 instance->unload = 1;
4528 megasas_flush_cache(instance); 4528 megasas_flush_cache(instance);
4529 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 4529 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
4530 instance->instancet->disable_intr(instance->reg_set);
4531 free_irq(instance->msi_flag ? instance->msixentry.vector :
4532 instance->pdev->irq, instance);
4533 if (instance->msi_flag)
4534 pci_disable_msix(instance->pdev);
4530} 4535}
4531 4536
4532/** 4537/**
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 145a8cffb1fa..f13e7abd345a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -696,22 +696,6 @@ fail_get_cmd:
696} 696}
697 697
698/* 698/*
699 * megasas_return_cmd_for_smid - Returns a cmd_fusion for a SMID
700 * @instance: Adapter soft state
701 *
702 */
703void
704megasas_return_cmd_for_smid(struct megasas_instance *instance, u16 smid)
705{
706 struct fusion_context *fusion;
707 struct megasas_cmd_fusion *cmd;
708
709 fusion = instance->ctrl_context;
710 cmd = fusion->cmd_list[smid - 1];
711 megasas_return_cmd_fusion(instance, cmd);
712}
713
714/*
715 * megasas_get_ld_map_info - Returns FW's ld_map structure 699 * megasas_get_ld_map_info - Returns FW's ld_map structure
716 * @instance: Adapter soft state 700 * @instance: Adapter soft state
717 * @pend: Pend the command or not 701 * @pend: Pend the command or not
@@ -1153,7 +1137,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1153 u64 start_blk = io_info->pdBlock; 1137 u64 start_blk = io_info->pdBlock;
1154 u8 *cdb = io_request->CDB.CDB32; 1138 u8 *cdb = io_request->CDB.CDB32;
1155 u32 num_blocks = io_info->numBlocks; 1139 u32 num_blocks = io_info->numBlocks;
1156 u8 opcode, flagvals, groupnum, control; 1140 u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0;
1157 1141
1158 /* Check if T10 PI (DIF) is enabled for this LD */ 1142 /* Check if T10 PI (DIF) is enabled for this LD */
1159 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); 1143 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
@@ -1235,7 +1219,46 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1235 cdb[8] = (u8)(num_blocks & 0xff); 1219 cdb[8] = (u8)(num_blocks & 0xff);
1236 cdb[7] = (u8)((num_blocks >> 8) & 0xff); 1220 cdb[7] = (u8)((num_blocks >> 8) & 0xff);
1237 1221
1222 io_request->IoFlags = 10; /* Specify 10-byte cdb */
1238 cdb_len = 10; 1223 cdb_len = 10;
1224 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
1225 /* Convert to 16 byte CDB for large LBA's */
1226 switch (cdb_len) {
1227 case 6:
1228 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
1229 control = cdb[5];
1230 break;
1231 case 10:
1232 opcode =
1233 cdb[0] == READ_10 ? READ_16 : WRITE_16;
1234 flagvals = cdb[1];
1235 groupnum = cdb[6];
1236 control = cdb[9];
1237 break;
1238 case 12:
1239 opcode =
1240 cdb[0] == READ_12 ? READ_16 : WRITE_16;
1241 flagvals = cdb[1];
1242 groupnum = cdb[10];
1243 control = cdb[11];
1244 break;
1245 }
1246
1247 memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1248
1249 cdb[0] = opcode;
1250 cdb[1] = flagvals;
1251 cdb[14] = groupnum;
1252 cdb[15] = control;
1253
1254 /* Transfer length */
1255 cdb[13] = (u8)(num_blocks & 0xff);
1256 cdb[12] = (u8)((num_blocks >> 8) & 0xff);
1257 cdb[11] = (u8)((num_blocks >> 16) & 0xff);
1258 cdb[10] = (u8)((num_blocks >> 24) & 0xff);
1259
1260 io_request->IoFlags = 16; /* Specify 16-byte cdb */
1261 cdb_len = 16;
1239 } 1262 }
1240 1263
1241 /* Normal case, just load LBA here */ 1264 /* Normal case, just load LBA here */
@@ -2026,17 +2049,11 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2026 struct fusion_context *fusion; 2049 struct fusion_context *fusion;
2027 struct megasas_cmd *cmd_mfi; 2050 struct megasas_cmd *cmd_mfi;
2028 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2051 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2029 u32 host_diag, abs_state; 2052 u32 host_diag, abs_state, status_reg, reset_adapter;
2030 2053
2031 instance = (struct megasas_instance *)shost->hostdata; 2054 instance = (struct megasas_instance *)shost->hostdata;
2032 fusion = instance->ctrl_context; 2055 fusion = instance->ctrl_context;
2033 2056
2034 mutex_lock(&instance->reset_mutex);
2035 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
2036 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
2037 instance->instancet->disable_intr(instance->reg_set);
2038 msleep(1000);
2039
2040 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { 2057 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
2041 printk(KERN_WARNING "megaraid_sas: Hardware critical error, " 2058 printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
2042 "returning FAILED.\n"); 2059 "returning FAILED.\n");
@@ -2044,6 +2061,12 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2044 goto out; 2061 goto out;
2045 } 2062 }
2046 2063
2064 mutex_lock(&instance->reset_mutex);
2065 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
2066 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
2067 instance->instancet->disable_intr(instance->reg_set);
2068 msleep(1000);
2069
2047 /* First try waiting for commands to complete */ 2070 /* First try waiting for commands to complete */
2048 if (megasas_wait_for_outstanding_fusion(instance)) { 2071 if (megasas_wait_for_outstanding_fusion(instance)) {
2049 printk(KERN_WARNING "megaraid_sas: resetting fusion " 2072 printk(KERN_WARNING "megaraid_sas: resetting fusion "
@@ -2060,7 +2083,12 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2060 } 2083 }
2061 } 2084 }
2062 2085
2063 if (instance->disableOnlineCtrlReset == 1) { 2086 status_reg = instance->instancet->read_fw_status_reg(
2087 instance->reg_set);
2088 abs_state = status_reg & MFI_STATE_MASK;
2089 reset_adapter = status_reg & MFI_RESET_ADAPTER;
2090 if (instance->disableOnlineCtrlReset ||
2091 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2064 /* Reset not supported, kill adapter */ 2092 /* Reset not supported, kill adapter */
2065 printk(KERN_WARNING "megaraid_sas: Reset not supported" 2093 printk(KERN_WARNING "megaraid_sas: Reset not supported"
2066 ", killing adapter.\n"); 2094 ", killing adapter.\n");
@@ -2089,6 +2117,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2089 2117
2090 /* Check that the diag write enable (DRWE) bit is on */ 2118 /* Check that the diag write enable (DRWE) bit is on */
2091 host_diag = readl(&instance->reg_set->fusion_host_diag); 2119 host_diag = readl(&instance->reg_set->fusion_host_diag);
2120 retry = 0;
2092 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { 2121 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2093 msleep(100); 2122 msleep(100);
2094 host_diag = 2123 host_diag =
@@ -2126,7 +2155,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2126 2155
2127 abs_state = 2156 abs_state =
2128 instance->instancet->read_fw_status_reg( 2157 instance->instancet->read_fw_status_reg(
2129 instance->reg_set); 2158 instance->reg_set) & MFI_STATE_MASK;
2130 retry = 0; 2159 retry = 0;
2131 2160
2132 while ((abs_state <= MFI_STATE_FW_INIT) && 2161 while ((abs_state <= MFI_STATE_FW_INIT) &&
@@ -2134,7 +2163,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2134 msleep(100); 2163 msleep(100);
2135 abs_state = 2164 abs_state =
2136 instance->instancet->read_fw_status_reg( 2165 instance->instancet->read_fw_status_reg(
2137 instance->reg_set); 2166 instance->reg_set) & MFI_STATE_MASK;
2138 } 2167 }
2139 if (abs_state <= MFI_STATE_FW_INIT) { 2168 if (abs_state <= MFI_STATE_FW_INIT) {
2140 printk(KERN_WARNING "megaraid_sas: firmware " 2169 printk(KERN_WARNING "megaraid_sas: firmware "
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 2a3c05f6db8b..dcc289c25459 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,11 +69,11 @@
69#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
72#define MPT2SAS_DRIVER_VERSION "08.100.00.01" 72#define MPT2SAS_DRIVER_VERSION "08.100.00.02"
73#define MPT2SAS_MAJOR_VERSION 08 73#define MPT2SAS_MAJOR_VERSION 08
74#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
75#define MPT2SAS_BUILD_VERSION 00 75#define MPT2SAS_BUILD_VERSION 00
76#define MPT2SAS_RELEASE_VERSION 01 76#define MPT2SAS_RELEASE_VERSION 02
77 77
78/* 78/*
79 * Set MPT2SAS_SG_DEPTH value based on user input. 79 * Set MPT2SAS_SG_DEPTH value based on user input.
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index f12e02358d6d..a7dbc6825f5f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -113,6 +113,7 @@ struct sense_info {
113}; 113};
114 114
115 115
116#define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC)
116#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF) 117#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
117 118
118/** 119/**
@@ -121,6 +122,7 @@ struct sense_info {
121 * @work: work object (ioc->fault_reset_work_q) 122 * @work: work object (ioc->fault_reset_work_q)
122 * @cancel_pending_work: flag set during reset handling 123 * @cancel_pending_work: flag set during reset handling
123 * @ioc: per adapter object 124 * @ioc: per adapter object
125 * @device_handle: device handle
124 * @VF_ID: virtual function id 126 * @VF_ID: virtual function id
125 * @VP_ID: virtual port id 127 * @VP_ID: virtual port id
126 * @ignore: flag meaning this event has been marked to ignore 128 * @ignore: flag meaning this event has been marked to ignore
@@ -134,6 +136,7 @@ struct fw_event_work {
134 u8 cancel_pending_work; 136 u8 cancel_pending_work;
135 struct delayed_work delayed_work; 137 struct delayed_work delayed_work;
136 struct MPT2SAS_ADAPTER *ioc; 138 struct MPT2SAS_ADAPTER *ioc;
139 u16 device_handle;
137 u8 VF_ID; 140 u8 VF_ID;
138 u8 VP_ID; 141 u8 VP_ID;
139 u8 ignore; 142 u8 ignore;
@@ -3499,6 +3502,7 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
3499 3502
3500 switch (prot_type) { 3503 switch (prot_type) {
3501 case SCSI_PROT_DIF_TYPE1: 3504 case SCSI_PROT_DIF_TYPE1:
3505 case SCSI_PROT_DIF_TYPE2:
3502 3506
3503 /* 3507 /*
3504 * enable ref/guard checking 3508 * enable ref/guard checking
@@ -3511,13 +3515,6 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
3511 cpu_to_be32(scsi_get_lba(scmd)); 3515 cpu_to_be32(scsi_get_lba(scmd));
3512 break; 3516 break;
3513 3517
3514 case SCSI_PROT_DIF_TYPE2:
3515
3516 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
3517 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
3518 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
3519 break;
3520
3521 case SCSI_PROT_DIF_TYPE3: 3518 case SCSI_PROT_DIF_TYPE3:
3522 3519
3523 /* 3520 /*
@@ -4047,17 +4044,75 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4047#endif 4044#endif
4048 4045
4049/** 4046/**
4050 * _scsih_smart_predicted_fault - illuminate Fault LED 4047 * _scsih_turn_on_fault_led - illuminate Fault LED
4051 * @ioc: per adapter object 4048 * @ioc: per adapter object
4052 * @handle: device handle 4049 * @handle: device handle
4050 * Context: process
4053 * 4051 *
4054 * Return nothing. 4052 * Return nothing.
4055 */ 4053 */
4056static void 4054static void
4057_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle) 4055_scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4058{ 4056{
4059 Mpi2SepReply_t mpi_reply; 4057 Mpi2SepReply_t mpi_reply;
4060 Mpi2SepRequest_t mpi_request; 4058 Mpi2SepRequest_t mpi_request;
4059
4060 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
4061 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
4062 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
4063 mpi_request.SlotStatus =
4064 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
4065 mpi_request.DevHandle = cpu_to_le16(handle);
4066 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
4067 if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
4068 &mpi_request)) != 0) {
4069 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
4070 __FILE__, __LINE__, __func__);
4071 return;
4072 }
4073
4074 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
4075 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "enclosure_processor: "
4076 "ioc_status (0x%04x), loginfo(0x%08x)\n", ioc->name,
4077 le16_to_cpu(mpi_reply.IOCStatus),
4078 le32_to_cpu(mpi_reply.IOCLogInfo)));
4079 return;
4080 }
4081}
4082
4083/**
4084 * _scsih_send_event_to_turn_on_fault_led - fire delayed event
4085 * @ioc: per adapter object
4086 * @handle: device handle
4087 * Context: interrupt.
4088 *
4089 * Return nothing.
4090 */
4091static void
4092_scsih_send_event_to_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4093{
4094 struct fw_event_work *fw_event;
4095
4096 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
4097 if (!fw_event)
4098 return;
4099 fw_event->event = MPT2SAS_TURN_ON_FAULT_LED;
4100 fw_event->device_handle = handle;
4101 fw_event->ioc = ioc;
4102 _scsih_fw_event_add(ioc, fw_event);
4103}
4104
4105/**
4106 * _scsih_smart_predicted_fault - process smart errors
4107 * @ioc: per adapter object
4108 * @handle: device handle
4109 * Context: interrupt.
4110 *
4111 * Return nothing.
4112 */
4113static void
4114_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4115{
4061 struct scsi_target *starget; 4116 struct scsi_target *starget;
4062 struct MPT2SAS_TARGET *sas_target_priv_data; 4117 struct MPT2SAS_TARGET *sas_target_priv_data;
4063 Mpi2EventNotificationReply_t *event_reply; 4118 Mpi2EventNotificationReply_t *event_reply;
@@ -4084,30 +4139,8 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4084 starget_printk(KERN_WARNING, starget, "predicted fault\n"); 4139 starget_printk(KERN_WARNING, starget, "predicted fault\n");
4085 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4140 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4086 4141
4087 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) { 4142 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
4088 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); 4143 _scsih_send_event_to_turn_on_fault_led(ioc, handle);
4089 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
4090 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
4091 mpi_request.SlotStatus =
4092 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
4093 mpi_request.DevHandle = cpu_to_le16(handle);
4094 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
4095 if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
4096 &mpi_request)) != 0) {
4097 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
4098 ioc->name, __FILE__, __LINE__, __func__);
4099 return;
4100 }
4101
4102 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
4103 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
4104 "enclosure_processor: ioc_status (0x%04x), "
4105 "loginfo(0x%08x)\n", ioc->name,
4106 le16_to_cpu(mpi_reply.IOCStatus),
4107 le32_to_cpu(mpi_reply.IOCLogInfo)));
4108 return;
4109 }
4110 }
4111 4144
4112 /* insert into event log */ 4145 /* insert into event log */
4113 sz = offsetof(Mpi2EventNotificationReply_t, EventData) + 4146 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
@@ -6753,6 +6786,9 @@ _firmware_event_work(struct work_struct *work)
6753 } 6786 }
6754 6787
6755 switch (fw_event->event) { 6788 switch (fw_event->event) {
6789 case MPT2SAS_TURN_ON_FAULT_LED:
6790 _scsih_turn_on_fault_led(ioc, fw_event->device_handle);
6791 break;
6756 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 6792 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6757 _scsih_sas_topology_change_event(ioc, fw_event); 6793 _scsih_sas_topology_change_event(ioc, fw_event);
6758 break; 6794 break;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 58f5be4740e9..de0b1a704fb5 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -4698,12 +4698,14 @@ static int __os_scsi_tape_open(struct inode * inode, struct file * filp)
4698 break; 4698 break;
4699 4699
4700 if ((SRpnt->sense[2] & 0x0f) == UNIT_ATTENTION) { 4700 if ((SRpnt->sense[2] & 0x0f) == UNIT_ATTENTION) {
4701 int j;
4702
4701 STp->pos_unknown = 0; 4703 STp->pos_unknown = 0;
4702 STp->partition = STp->new_partition = 0; 4704 STp->partition = STp->new_partition = 0;
4703 if (STp->can_partitions) 4705 if (STp->can_partitions)
4704 STp->nbr_partitions = 1; /* This guess will be updated later if necessary */ 4706 STp->nbr_partitions = 1; /* This guess will be updated later if necessary */
4705 for (i=0; i < ST_NBR_PARTITIONS; i++) { 4707 for (j = 0; j < ST_NBR_PARTITIONS; j++) {
4706 STps = &(STp->ps[i]); 4708 STps = &(STp->ps[j]);
4707 STps->rw = ST_IDLE; 4709 STps->rw = ST_IDLE;
4708 STps->eof = ST_NOEOF; 4710 STps->eof = ST_NOEOF;
4709 STps->at_sm = 0; 4711 STps->at_sm = 0;
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 7f636b118287..fca6a8953070 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4252,8 +4252,8 @@ static ssize_t pmcraid_show_drv_version(
4252 char *buf 4252 char *buf
4253) 4253)
4254{ 4254{
4255 return snprintf(buf, PAGE_SIZE, "version: %s, build date: %s\n", 4255 return snprintf(buf, PAGE_SIZE, "version: %s\n",
4256 PMCRAID_DRIVER_VERSION, PMCRAID_DRIVER_DATE); 4256 PMCRAID_DRIVER_VERSION);
4257} 4257}
4258 4258
4259static struct device_attribute pmcraid_driver_version_attr = { 4259static struct device_attribute pmcraid_driver_version_attr = {
@@ -6096,9 +6096,8 @@ static int __init pmcraid_init(void)
6096 dev_t dev; 6096 dev_t dev;
6097 int error; 6097 int error;
6098 6098
6099 pmcraid_info("%s Device Driver version: %s %s\n", 6099 pmcraid_info("%s Device Driver version: %s\n",
6100 PMCRAID_DRIVER_NAME, 6100 PMCRAID_DRIVER_NAME, PMCRAID_DRIVER_VERSION);
6101 PMCRAID_DRIVER_VERSION, PMCRAID_DRIVER_DATE);
6102 6101
6103 error = alloc_chrdev_region(&dev, 0, 6102 error = alloc_chrdev_region(&dev, 0,
6104 PMCRAID_MAX_ADAPTERS, 6103 PMCRAID_MAX_ADAPTERS,
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index 34e4c915002e..f920baf3ff24 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -43,7 +43,6 @@
43#define PMCRAID_DRIVER_NAME "PMC MaxRAID" 43#define PMCRAID_DRIVER_NAME "PMC MaxRAID"
44#define PMCRAID_DEVFILE "pmcsas" 44#define PMCRAID_DEVFILE "pmcsas"
45#define PMCRAID_DRIVER_VERSION "1.0.3" 45#define PMCRAID_DRIVER_VERSION "1.0.3"
46#define PMCRAID_DRIVER_DATE __DATE__
47 46
48#define PMCRAID_FW_VERSION_1 0x002 47#define PMCRAID_FW_VERSION_1 0x002
49 48
diff --git a/drivers/scsi/qla4xxx/Makefile b/drivers/scsi/qla4xxx/Makefile
index 0339ff03a535..252523d7847e 100644
--- a/drivers/scsi/qla4xxx/Makefile
+++ b/drivers/scsi/qla4xxx/Makefile
@@ -1,5 +1,5 @@
1qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \ 1qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
2 ql4_nx.o ql4_nvram.o ql4_dbg.o 2 ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o
3 3
4obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o 4obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o
5 5
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
new file mode 100644
index 000000000000..864d018631c0
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -0,0 +1,69 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include "ql4_def.h"
9#include "ql4_glbl.h"
10#include "ql4_dbg.h"
11
12/* Scsi_Host attributes. */
13static ssize_t
14qla4xxx_fw_version_show(struct device *dev,
15 struct device_attribute *attr, char *buf)
16{
17 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
18
19 if (is_qla8022(ha))
20 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
21 ha->firmware_version[0],
22 ha->firmware_version[1],
23 ha->patch_number, ha->build_number);
24 else
25 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
26 ha->firmware_version[0],
27 ha->firmware_version[1],
28 ha->patch_number, ha->build_number);
29}
30
31static ssize_t
32qla4xxx_serial_num_show(struct device *dev, struct device_attribute *attr,
33 char *buf)
34{
35 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
36 return snprintf(buf, PAGE_SIZE, "%s\n", ha->serial_number);
37}
38
39static ssize_t
40qla4xxx_iscsi_version_show(struct device *dev, struct device_attribute *attr,
41 char *buf)
42{
43 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
44 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->iscsi_major,
45 ha->iscsi_minor);
46}
47
48static ssize_t
49qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr,
50 char *buf)
51{
52 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
53 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
54 ha->bootload_major, ha->bootload_minor,
55 ha->bootload_patch, ha->bootload_build);
56}
57
58static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL);
59static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL);
60static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL);
61static DEVICE_ATTR(optrom_version, S_IRUGO, qla4xxx_optrom_version_show, NULL);
62
63struct device_attribute *qla4xxx_host_attrs[] = {
64 &dev_attr_fw_version,
65 &dev_attr_serial_num,
66 &dev_attr_iscsi_version,
67 &dev_attr_optrom_version,
68 NULL,
69};
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 4757878d59dd..473c5c872b39 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -115,7 +115,7 @@
115#define INVALID_ENTRY 0xFFFF 115#define INVALID_ENTRY 0xFFFF
116#define MAX_CMDS_TO_RISC 1024 116#define MAX_CMDS_TO_RISC 1024
117#define MAX_SRBS MAX_CMDS_TO_RISC 117#define MAX_SRBS MAX_CMDS_TO_RISC
118#define MBOX_AEN_REG_COUNT 5 118#define MBOX_AEN_REG_COUNT 8
119#define MAX_INIT_RETRIES 5 119#define MAX_INIT_RETRIES 5
120 120
121/* 121/*
@@ -368,7 +368,6 @@ struct scsi_qla_host {
368#define AF_INIT_DONE 1 /* 0x00000002 */ 368#define AF_INIT_DONE 1 /* 0x00000002 */
369#define AF_MBOX_COMMAND 2 /* 0x00000004 */ 369#define AF_MBOX_COMMAND 2 /* 0x00000004 */
370#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */ 370#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
371#define AF_DPC_SCHEDULED 5 /* 0x00000020 */
372#define AF_INTERRUPTS_ON 6 /* 0x00000040 */ 371#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
373#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */ 372#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
374#define AF_LINK_UP 8 /* 0x00000100 */ 373#define AF_LINK_UP 8 /* 0x00000100 */
@@ -584,6 +583,14 @@ struct scsi_qla_host {
584 uint32_t nx_reset_timeout; 583 uint32_t nx_reset_timeout;
585 584
586 struct completion mbx_intr_comp; 585 struct completion mbx_intr_comp;
586
587 /* --- From About Firmware --- */
588 uint16_t iscsi_major;
589 uint16_t iscsi_minor;
590 uint16_t bootload_major;
591 uint16_t bootload_minor;
592 uint16_t bootload_patch;
593 uint16_t bootload_build;
587}; 594};
588 595
589static inline int is_ipv4_enabled(struct scsi_qla_host *ha) 596static inline int is_ipv4_enabled(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 31e2bf97198c..01082aa77098 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -690,6 +690,29 @@ struct mbx_sys_info {
690 uint8_t reserved[12]; /* 34-3f */ 690 uint8_t reserved[12]; /* 34-3f */
691}; 691};
692 692
693struct about_fw_info {
694 uint16_t fw_major; /* 00 - 01 */
695 uint16_t fw_minor; /* 02 - 03 */
696 uint16_t fw_patch; /* 04 - 05 */
697 uint16_t fw_build; /* 06 - 07 */
698 uint8_t fw_build_date[16]; /* 08 - 17 ASCII String */
699 uint8_t fw_build_time[16]; /* 18 - 27 ASCII String */
700 uint8_t fw_build_user[16]; /* 28 - 37 ASCII String */
701 uint16_t fw_load_source; /* 38 - 39 */
702 /* 1 = Flash Primary,
703 2 = Flash Secondary,
704 3 = Host Download
705 */
706 uint8_t reserved1[6]; /* 3A - 3F */
707 uint16_t iscsi_major; /* 40 - 41 */
708 uint16_t iscsi_minor; /* 42 - 43 */
709 uint16_t bootload_major; /* 44 - 45 */
710 uint16_t bootload_minor; /* 46 - 47 */
711 uint16_t bootload_patch; /* 48 - 49 */
712 uint16_t bootload_build; /* 4A - 4B */
713 uint8_t reserved2[180]; /* 4C - FF */
714};
715
693struct crash_record { 716struct crash_record {
694 uint16_t fw_major_version; /* 00 - 01 */ 717 uint16_t fw_major_version; /* 00 - 01 */
695 uint16_t fw_minor_version; /* 02 - 03 */ 718 uint16_t fw_minor_version; /* 02 - 03 */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index cc53e3fbd78c..a53a256c1f8d 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -61,7 +61,7 @@ struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha);
61int qla4xxx_add_sess(struct ddb_entry *); 61int qla4xxx_add_sess(struct ddb_entry *);
62void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry); 62void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry);
63int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha); 63int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha);
64int qla4xxx_get_fw_version(struct scsi_qla_host * ha); 64int qla4xxx_about_firmware(struct scsi_qla_host *ha);
65void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha, 65void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha,
66 uint32_t intr_status); 66 uint32_t intr_status);
67int qla4xxx_init_rings(struct scsi_qla_host *ha); 67int qla4xxx_init_rings(struct scsi_qla_host *ha);
@@ -139,4 +139,5 @@ extern int ql4xextended_error_logging;
139extern int ql4xdontresethba; 139extern int ql4xdontresethba;
140extern int ql4xenablemsix; 140extern int ql4xenablemsix;
141 141
142extern struct device_attribute *qla4xxx_host_attrs[];
142#endif /* _QLA4x_GBL_H */ 143#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 48e2241ddaf4..42ed5db2d530 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1275,7 +1275,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
1275 if (ha->isp_ops->start_firmware(ha) == QLA_ERROR) 1275 if (ha->isp_ops->start_firmware(ha) == QLA_ERROR)
1276 goto exit_init_hba; 1276 goto exit_init_hba;
1277 1277
1278 if (qla4xxx_get_fw_version(ha) == QLA_ERROR) 1278 if (qla4xxx_about_firmware(ha) == QLA_ERROR)
1279 goto exit_init_hba; 1279 goto exit_init_hba;
1280 1280
1281 if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR) 1281 if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR)
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 2f40ac761cd4..0e72921c752d 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -25,9 +25,14 @@ static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
25 25
26 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 26 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
27 sense_len = le16_to_cpu(sts_entry->senseDataByteCnt); 27 sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
28 if (sense_len == 0) 28 if (sense_len == 0) {
29 DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%d: %s:"
30 " sense len 0\n", ha->host_no,
31 cmd->device->channel, cmd->device->id,
32 cmd->device->lun, __func__));
33 ha->status_srb = NULL;
29 return; 34 return;
30 35 }
31 /* Save total available sense length, 36 /* Save total available sense length,
32 * not to exceed cmd's sense buffer size */ 37 * not to exceed cmd's sense buffer size */
33 sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE); 38 sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE);
@@ -541,6 +546,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
541 case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */ 546 case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */
542 case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR: 547 case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
543 case MBOX_ASTS_SUBNET_STATE_CHANGE: 548 case MBOX_ASTS_SUBNET_STATE_CHANGE:
549 case MBOX_ASTS_DUPLICATE_IP:
544 /* No action */ 550 /* No action */
545 DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no, 551 DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
546 mbox_status)); 552 mbox_status));
@@ -593,11 +599,13 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
593 mbox_sts[i]; 599 mbox_sts[i];
594 600
595 /* print debug message */ 601 /* print debug message */
596 DEBUG2(printk("scsi%ld: AEN[%d] %04x queued" 602 DEBUG2(printk("scsi%ld: AEN[%d] %04x queued "
597 " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n", 603 "mb1:0x%x mb2:0x%x mb3:0x%x "
598 ha->host_no, ha->aen_in, mbox_sts[0], 604 "mb4:0x%x mb5:0x%x\n",
599 mbox_sts[1], mbox_sts[2], mbox_sts[3], 605 ha->host_no, ha->aen_in,
600 mbox_sts[4])); 606 mbox_sts[0], mbox_sts[1],
607 mbox_sts[2], mbox_sts[3],
608 mbox_sts[4], mbox_sts[5]));
601 609
602 /* advance pointer */ 610 /* advance pointer */
603 ha->aen_in++; 611 ha->aen_in++;
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index d78b58dc5011..fce8289e9752 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -86,22 +86,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
86 msleep(10); 86 msleep(10);
87 } 87 }
88 88
89 /* To prevent overwriting mailbox registers for a command that has
90 * not yet been serviced, check to see if an active command
91 * (AEN, IOCB, etc.) is interrupting, then service it.
92 * -----------------------------------------------------------------
93 */
94 spin_lock_irqsave(&ha->hardware_lock, flags); 89 spin_lock_irqsave(&ha->hardware_lock, flags);
95 90
96 if (!is_qla8022(ha)) {
97 intr_status = readl(&ha->reg->ctrl_status);
98 if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
99 /* Service existing interrupt */
100 ha->isp_ops->interrupt_service_routine(ha, intr_status);
101 clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
102 }
103 }
104
105 ha->mbox_status_count = outCount; 91 ha->mbox_status_count = outCount;
106 for (i = 0; i < outCount; i++) 92 for (i = 0; i < outCount; i++)
107 ha->mbox_status[i] = 0; 93 ha->mbox_status[i] = 0;
@@ -1057,38 +1043,65 @@ int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
1057} 1043}
1058 1044
1059/** 1045/**
1060 * qla4xxx_get_fw_version - gets firmware version 1046 * qla4xxx_about_firmware - gets FW, iscsi draft and boot loader version
1061 * @ha: Pointer to host adapter structure. 1047 * @ha: Pointer to host adapter structure.
1062 * 1048 *
1063 * Retrieves the firmware version on HBA. In QLA4010, mailboxes 2 & 3 may 1049 * Retrieves the FW version, iSCSI draft version & bootloader version of HBA.
1064 * hold an address for data. Make sure that we write 0 to those mailboxes, 1050 * Mailboxes 2 & 3 may hold an address for data. Make sure that we write 0 to
1065 * if unused. 1051 * those mailboxes, if unused.
1066 **/ 1052 **/
1067int qla4xxx_get_fw_version(struct scsi_qla_host * ha) 1053int qla4xxx_about_firmware(struct scsi_qla_host *ha)
1068{ 1054{
1055 struct about_fw_info *about_fw = NULL;
1056 dma_addr_t about_fw_dma;
1069 uint32_t mbox_cmd[MBOX_REG_COUNT]; 1057 uint32_t mbox_cmd[MBOX_REG_COUNT];
1070 uint32_t mbox_sts[MBOX_REG_COUNT]; 1058 uint32_t mbox_sts[MBOX_REG_COUNT];
1059 int status = QLA_ERROR;
1060
1061 about_fw = dma_alloc_coherent(&ha->pdev->dev,
1062 sizeof(struct about_fw_info),
1063 &about_fw_dma, GFP_KERNEL);
1064 if (!about_fw) {
1065 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
1066 "for about_fw\n", __func__));
1067 return status;
1068 }
1071 1069
1072 /* Get firmware version. */ 1070 memset(about_fw, 0, sizeof(struct about_fw_info));
1073 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 1071 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1074 memset(&mbox_sts, 0, sizeof(mbox_sts)); 1072 memset(&mbox_sts, 0, sizeof(mbox_sts));
1075 1073
1076 mbox_cmd[0] = MBOX_CMD_ABOUT_FW; 1074 mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
1077 1075 mbox_cmd[2] = LSDW(about_fw_dma);
1078 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) != 1076 mbox_cmd[3] = MSDW(about_fw_dma);
1079 QLA_SUCCESS) { 1077 mbox_cmd[4] = sizeof(struct about_fw_info);
1080 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_ABOUT_FW failed w/ " 1078
1081 "status %04X\n", ha->host_no, __func__, mbox_sts[0])); 1079 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
1082 return QLA_ERROR; 1080 &mbox_cmd[0], &mbox_sts[0]);
1081 if (status != QLA_SUCCESS) {
1082 DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_ABOUT_FW "
1083 "failed w/ status %04X\n", __func__,
1084 mbox_sts[0]));
1085 goto exit_about_fw;
1083 } 1086 }
1084 1087
1085 /* Save firmware version information. */ 1088 /* Save version information. */
1086 ha->firmware_version[0] = mbox_sts[1]; 1089 ha->firmware_version[0] = le16_to_cpu(about_fw->fw_major);
1087 ha->firmware_version[1] = mbox_sts[2]; 1090 ha->firmware_version[1] = le16_to_cpu(about_fw->fw_minor);
1088 ha->patch_number = mbox_sts[3]; 1091 ha->patch_number = le16_to_cpu(about_fw->fw_patch);
1089 ha->build_number = mbox_sts[4]; 1092 ha->build_number = le16_to_cpu(about_fw->fw_build);
1093 ha->iscsi_major = le16_to_cpu(about_fw->iscsi_major);
1094 ha->iscsi_minor = le16_to_cpu(about_fw->iscsi_minor);
1095 ha->bootload_major = le16_to_cpu(about_fw->bootload_major);
1096 ha->bootload_minor = le16_to_cpu(about_fw->bootload_minor);
1097 ha->bootload_patch = le16_to_cpu(about_fw->bootload_patch);
1098 ha->bootload_build = le16_to_cpu(about_fw->bootload_build);
1099 status = QLA_SUCCESS;
1090 1100
1091 return QLA_SUCCESS; 1101exit_about_fw:
1102 dma_free_coherent(&ha->pdev->dev, sizeof(struct about_fw_info),
1103 about_fw, about_fw_dma);
1104 return status;
1092} 1105}
1093 1106
1094static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, 1107static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha,
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 03e522b2fe0b..fdfe27b38698 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -964,12 +964,26 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
964 /* Halt all the indiviual PEGs and other blocks of the ISP */ 964 /* Halt all the indiviual PEGs and other blocks of the ISP */
965 qla4_8xxx_rom_lock(ha); 965 qla4_8xxx_rom_lock(ha);
966 966
967 /* mask all niu interrupts */ 967 /* disable all I2Q */
968 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
969 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
970 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
971 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
972 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
973 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
974
975 /* disable all niu interrupts */
968 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff); 976 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
969 /* disable xge rx/tx */ 977 /* disable xge rx/tx */
970 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00); 978 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
971 /* disable xg1 rx/tx */ 979 /* disable xg1 rx/tx */
972 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00); 980 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
981 /* disable sideband mac */
982 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
983 /* disable ap0 mac */
984 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
985 /* disable ap1 mac */
986 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
973 987
974 /* halt sre */ 988 /* halt sre */
975 val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000); 989 val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
@@ -984,6 +998,7 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
984 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0); 998 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
985 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0); 999 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
986 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0); 1000 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
1001 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
987 1002
988 /* halt pegs */ 1003 /* halt pegs */
989 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1); 1004 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
@@ -991,9 +1006,9 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
991 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1); 1006 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
992 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1); 1007 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
993 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1); 1008 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
1009 msleep(5);
994 1010
995 /* big hammer */ 1011 /* big hammer */
996 msleep(1000);
997 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 1012 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
998 /* don't reset CAM block on reset */ 1013 /* don't reset CAM block on reset */
999 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); 1014 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index c22f2a764d9d..f2364ec59f03 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -124,6 +124,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
124 .sg_tablesize = SG_ALL, 124 .sg_tablesize = SG_ALL,
125 125
126 .max_sectors = 0xFFFF, 126 .max_sectors = 0xFFFF,
127 .shost_attrs = qla4xxx_host_attrs,
127}; 128};
128 129
129static struct iscsi_transport qla4xxx_iscsi_transport = { 130static struct iscsi_transport qla4xxx_iscsi_transport = {
@@ -412,8 +413,7 @@ void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
412 413
413static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha, 414static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
414 struct ddb_entry *ddb_entry, 415 struct ddb_entry *ddb_entry,
415 struct scsi_cmnd *cmd, 416 struct scsi_cmnd *cmd)
416 void (*done)(struct scsi_cmnd *))
417{ 417{
418 struct srb *srb; 418 struct srb *srb;
419 419
@@ -427,7 +427,6 @@ static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
427 srb->cmd = cmd; 427 srb->cmd = cmd;
428 srb->flags = 0; 428 srb->flags = 0;
429 CMD_SP(cmd) = (void *)srb; 429 CMD_SP(cmd) = (void *)srb;
430 cmd->scsi_done = done;
431 430
432 return srb; 431 return srb;
433} 432}
@@ -458,9 +457,8 @@ void qla4xxx_srb_compl(struct kref *ref)
458 457
459/** 458/**
460 * qla4xxx_queuecommand - scsi layer issues scsi command to driver. 459 * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
460 * @host: scsi host
461 * @cmd: Pointer to Linux's SCSI command structure 461 * @cmd: Pointer to Linux's SCSI command structure
462 * @done_fn: Function that the driver calls to notify the SCSI mid-layer
463 * that the command has been processed.
464 * 462 *
465 * Remarks: 463 * Remarks:
466 * This routine is invoked by Linux to send a SCSI command to the driver. 464 * This routine is invoked by Linux to send a SCSI command to the driver.
@@ -470,10 +468,9 @@ void qla4xxx_srb_compl(struct kref *ref)
470 * completion handling). Unfortunely, it sometimes calls the scheduler 468 * completion handling). Unfortunely, it sometimes calls the scheduler
471 * in interrupt context which is a big NO! NO!. 469 * in interrupt context which is a big NO! NO!.
472 **/ 470 **/
473static int qla4xxx_queuecommand_lck(struct scsi_cmnd *cmd, 471static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
474 void (*done)(struct scsi_cmnd *))
475{ 472{
476 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 473 struct scsi_qla_host *ha = to_qla_host(host);
477 struct ddb_entry *ddb_entry = cmd->device->hostdata; 474 struct ddb_entry *ddb_entry = cmd->device->hostdata;
478 struct iscsi_cls_session *sess = ddb_entry->sess; 475 struct iscsi_cls_session *sess = ddb_entry->sess;
479 struct srb *srb; 476 struct srb *srb;
@@ -515,37 +512,29 @@ static int qla4xxx_queuecommand_lck(struct scsi_cmnd *cmd,
515 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) 512 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
516 goto qc_host_busy; 513 goto qc_host_busy;
517 514
518 spin_unlock_irq(ha->host->host_lock); 515 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
519
520 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd, done);
521 if (!srb) 516 if (!srb)
522 goto qc_host_busy_lock; 517 goto qc_host_busy;
523 518
524 rval = qla4xxx_send_command_to_isp(ha, srb); 519 rval = qla4xxx_send_command_to_isp(ha, srb);
525 if (rval != QLA_SUCCESS) 520 if (rval != QLA_SUCCESS)
526 goto qc_host_busy_free_sp; 521 goto qc_host_busy_free_sp;
527 522
528 spin_lock_irq(ha->host->host_lock);
529 return 0; 523 return 0;
530 524
531qc_host_busy_free_sp: 525qc_host_busy_free_sp:
532 qla4xxx_srb_free_dma(ha, srb); 526 qla4xxx_srb_free_dma(ha, srb);
533 mempool_free(srb, ha->srb_mempool); 527 mempool_free(srb, ha->srb_mempool);
534 528
535qc_host_busy_lock:
536 spin_lock_irq(ha->host->host_lock);
537
538qc_host_busy: 529qc_host_busy:
539 return SCSI_MLQUEUE_HOST_BUSY; 530 return SCSI_MLQUEUE_HOST_BUSY;
540 531
541qc_fail_command: 532qc_fail_command:
542 done(cmd); 533 cmd->scsi_done(cmd);
543 534
544 return 0; 535 return 0;
545} 536}
546 537
547static DEF_SCSI_QCMD(qla4xxx_queuecommand)
548
549/** 538/**
550 * qla4xxx_mem_free - frees memory allocated to adapter 539 * qla4xxx_mem_free - frees memory allocated to adapter
551 * @ha: Pointer to host adapter structure. 540 * @ha: Pointer to host adapter structure.
@@ -679,7 +668,27 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
679 if (ha->seconds_since_last_heartbeat == 2) { 668 if (ha->seconds_since_last_heartbeat == 2) {
680 ha->seconds_since_last_heartbeat = 0; 669 ha->seconds_since_last_heartbeat = 0;
681 halt_status = qla4_8xxx_rd_32(ha, 670 halt_status = qla4_8xxx_rd_32(ha,
682 QLA82XX_PEG_HALT_STATUS1); 671 QLA82XX_PEG_HALT_STATUS1);
672
673 ql4_printk(KERN_INFO, ha,
674 "scsi(%ld): %s, Dumping hw/fw registers:\n "
675 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
676 " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
677 " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
678 " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
679 ha->host_no, __func__, halt_status,
680 qla4_8xxx_rd_32(ha,
681 QLA82XX_PEG_HALT_STATUS2),
682 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
683 0x3c),
684 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
685 0x3c),
686 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
687 0x3c),
688 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
689 0x3c),
690 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
691 0x3c));
683 692
684 /* Since we cannot change dev_state in interrupt 693 /* Since we cannot change dev_state in interrupt
685 * context, set appropriate DPC flag then wakeup 694 * context, set appropriate DPC flag then wakeup
@@ -715,7 +724,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
715 /* don't poll if reset is going on */ 724 /* don't poll if reset is going on */
716 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 725 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
717 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 726 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
718 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags))) { 727 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
719 if (dev_state == QLA82XX_DEV_NEED_RESET && 728 if (dev_state == QLA82XX_DEV_NEED_RESET &&
720 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 729 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
721 if (!ql4xdontresethba) { 730 if (!ql4xdontresethba) {
@@ -839,7 +848,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
839 } 848 }
840 849
841 /* Wakeup the dpc routine for this adapter, if needed. */ 850 /* Wakeup the dpc routine for this adapter, if needed. */
842 if ((start_dpc || 851 if (start_dpc ||
843 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 852 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
844 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) || 853 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
845 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) || 854 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
@@ -849,9 +858,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
849 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) || 858 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
850 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || 859 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
851 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 860 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
852 test_bit(DPC_AEN, &ha->dpc_flags)) && 861 test_bit(DPC_AEN, &ha->dpc_flags)) {
853 !test_bit(AF_DPC_SCHEDULED, &ha->flags) &&
854 ha->dpc_thread) {
855 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" 862 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
856 " - dpc flags = 0x%lx\n", 863 " - dpc flags = 0x%lx\n",
857 ha->host_no, __func__, ha->dpc_flags)); 864 ha->host_no, __func__, ha->dpc_flags));
@@ -1241,11 +1248,8 @@ static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
1241 1248
1242void qla4xxx_wake_dpc(struct scsi_qla_host *ha) 1249void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
1243{ 1250{
1244 if (ha->dpc_thread && 1251 if (ha->dpc_thread)
1245 !test_bit(AF_DPC_SCHEDULED, &ha->flags)) {
1246 set_bit(AF_DPC_SCHEDULED, &ha->flags);
1247 queue_work(ha->dpc_thread, &ha->dpc_work); 1252 queue_work(ha->dpc_thread, &ha->dpc_work);
1248 }
1249} 1253}
1250 1254
1251/** 1255/**
@@ -1272,12 +1276,12 @@ static void qla4xxx_do_dpc(struct work_struct *work)
1272 1276
1273 /* Initialization not yet finished. Don't do anything yet. */ 1277 /* Initialization not yet finished. Don't do anything yet. */
1274 if (!test_bit(AF_INIT_DONE, &ha->flags)) 1278 if (!test_bit(AF_INIT_DONE, &ha->flags))
1275 goto do_dpc_exit; 1279 return;
1276 1280
1277 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 1281 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
1278 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n", 1282 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
1279 ha->host_no, __func__, ha->flags)); 1283 ha->host_no, __func__, ha->flags));
1280 goto do_dpc_exit; 1284 return;
1281 } 1285 }
1282 1286
1283 if (is_qla8022(ha)) { 1287 if (is_qla8022(ha)) {
@@ -1384,8 +1388,6 @@ dpc_post_reset_ha:
1384 } 1388 }
1385 } 1389 }
1386 1390
1387do_dpc_exit:
1388 clear_bit(AF_DPC_SCHEDULED, &ha->flags);
1389} 1391}
1390 1392
1391/** 1393/**
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 603155769407..610492877253 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.02.00-k6" 8#define QLA4XXX_DRIVER_VERSION "5.02.00-k7"
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index abea2cf05c2e..a4b9cdbaaa0b 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -50,6 +50,8 @@
50#define BUS_RESET_SETTLE_TIME (10) 50#define BUS_RESET_SETTLE_TIME (10)
51#define HOST_RESET_SETTLE_TIME (10) 51#define HOST_RESET_SETTLE_TIME (10)
52 52
53static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
54
53/* called with shost->host_lock held */ 55/* called with shost->host_lock held */
54void scsi_eh_wakeup(struct Scsi_Host *shost) 56void scsi_eh_wakeup(struct Scsi_Host *shost)
55{ 57{
@@ -947,6 +949,48 @@ retry_tur:
947} 949}
948 950
949/** 951/**
952 * scsi_eh_test_devices - check if devices are responding from error recovery.
953 * @cmd_list: scsi commands in error recovery.
954 * @work_q: queue for commands which still need more error recovery
955 * @done_q: queue for commands which are finished
956 * @try_stu: boolean on if a STU command should be tried in addition to TUR.
957 *
958 * Decription:
959 * Tests if devices are in a working state. Commands to devices now in
960 * a working state are sent to the done_q while commands to devices which
961 * are still failing to respond are returned to the work_q for more
962 * processing.
963 **/
964static int scsi_eh_test_devices(struct list_head *cmd_list,
965 struct list_head *work_q,
966 struct list_head *done_q, int try_stu)
967{
968 struct scsi_cmnd *scmd, *next;
969 struct scsi_device *sdev;
970 int finish_cmds;
971
972 while (!list_empty(cmd_list)) {
973 scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
974 sdev = scmd->device;
975
976 finish_cmds = !scsi_device_online(scmd->device) ||
977 (try_stu && !scsi_eh_try_stu(scmd) &&
978 !scsi_eh_tur(scmd)) ||
979 !scsi_eh_tur(scmd);
980
981 list_for_each_entry_safe(scmd, next, cmd_list, eh_entry)
982 if (scmd->device == sdev) {
983 if (finish_cmds)
984 scsi_eh_finish_cmd(scmd, done_q);
985 else
986 list_move_tail(&scmd->eh_entry, work_q);
987 }
988 }
989 return list_empty(work_q);
990}
991
992
993/**
950 * scsi_eh_abort_cmds - abort pending commands. 994 * scsi_eh_abort_cmds - abort pending commands.
951 * @work_q: &list_head for pending commands. 995 * @work_q: &list_head for pending commands.
952 * @done_q: &list_head for processed commands. 996 * @done_q: &list_head for processed commands.
@@ -962,6 +1006,7 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
962 struct list_head *done_q) 1006 struct list_head *done_q)
963{ 1007{
964 struct scsi_cmnd *scmd, *next; 1008 struct scsi_cmnd *scmd, *next;
1009 LIST_HEAD(check_list);
965 int rtn; 1010 int rtn;
966 1011
967 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 1012 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
@@ -973,11 +1018,10 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
973 rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd); 1018 rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd);
974 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { 1019 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
975 scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD; 1020 scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
976 if (!scsi_device_online(scmd->device) || 1021 if (rtn == FAST_IO_FAIL)
977 rtn == FAST_IO_FAIL ||
978 !scsi_eh_tur(scmd)) {
979 scsi_eh_finish_cmd(scmd, done_q); 1022 scsi_eh_finish_cmd(scmd, done_q);
980 } 1023 else
1024 list_move_tail(&scmd->eh_entry, &check_list);
981 } else 1025 } else
982 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting" 1026 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"
983 " cmd failed:" 1027 " cmd failed:"
@@ -986,7 +1030,7 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
986 scmd)); 1030 scmd));
987 } 1031 }
988 1032
989 return list_empty(work_q); 1033 return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
990} 1034}
991 1035
992/** 1036/**
@@ -1137,6 +1181,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
1137 struct list_head *done_q) 1181 struct list_head *done_q)
1138{ 1182{
1139 LIST_HEAD(tmp_list); 1183 LIST_HEAD(tmp_list);
1184 LIST_HEAD(check_list);
1140 1185
1141 list_splice_init(work_q, &tmp_list); 1186 list_splice_init(work_q, &tmp_list);
1142 1187
@@ -1161,9 +1206,9 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
1161 if (scmd_id(scmd) != id) 1206 if (scmd_id(scmd) != id)
1162 continue; 1207 continue;
1163 1208
1164 if ((rtn == SUCCESS || rtn == FAST_IO_FAIL) 1209 if (rtn == SUCCESS)
1165 && (!scsi_device_online(scmd->device) || 1210 list_move_tail(&scmd->eh_entry, &check_list);
1166 rtn == FAST_IO_FAIL || !scsi_eh_tur(scmd))) 1211 else if (rtn == FAST_IO_FAIL)
1167 scsi_eh_finish_cmd(scmd, done_q); 1212 scsi_eh_finish_cmd(scmd, done_q);
1168 else 1213 else
1169 /* push back on work queue for further processing */ 1214 /* push back on work queue for further processing */
@@ -1171,7 +1216,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
1171 } 1216 }
1172 } 1217 }
1173 1218
1174 return list_empty(work_q); 1219 return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1175} 1220}
1176 1221
1177/** 1222/**
@@ -1185,6 +1230,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1185 struct list_head *done_q) 1230 struct list_head *done_q)
1186{ 1231{
1187 struct scsi_cmnd *scmd, *chan_scmd, *next; 1232 struct scsi_cmnd *scmd, *chan_scmd, *next;
1233 LIST_HEAD(check_list);
1188 unsigned int channel; 1234 unsigned int channel;
1189 int rtn; 1235 int rtn;
1190 1236
@@ -1216,12 +1262,14 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1216 rtn = scsi_try_bus_reset(chan_scmd); 1262 rtn = scsi_try_bus_reset(chan_scmd);
1217 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { 1263 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1218 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 1264 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1219 if (channel == scmd_channel(scmd)) 1265 if (channel == scmd_channel(scmd)) {
1220 if (!scsi_device_online(scmd->device) || 1266 if (rtn == FAST_IO_FAIL)
1221 rtn == FAST_IO_FAIL ||
1222 !scsi_eh_tur(scmd))
1223 scsi_eh_finish_cmd(scmd, 1267 scsi_eh_finish_cmd(scmd,
1224 done_q); 1268 done_q);
1269 else
1270 list_move_tail(&scmd->eh_entry,
1271 &check_list);
1272 }
1225 } 1273 }
1226 } else { 1274 } else {
1227 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST" 1275 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST"
@@ -1230,7 +1278,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1230 channel)); 1278 channel));
1231 } 1279 }
1232 } 1280 }
1233 return list_empty(work_q); 1281 return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1234} 1282}
1235 1283
1236/** 1284/**
@@ -1242,6 +1290,7 @@ static int scsi_eh_host_reset(struct list_head *work_q,
1242 struct list_head *done_q) 1290 struct list_head *done_q)
1243{ 1291{
1244 struct scsi_cmnd *scmd, *next; 1292 struct scsi_cmnd *scmd, *next;
1293 LIST_HEAD(check_list);
1245 int rtn; 1294 int rtn;
1246 1295
1247 if (!list_empty(work_q)) { 1296 if (!list_empty(work_q)) {
@@ -1252,12 +1301,10 @@ static int scsi_eh_host_reset(struct list_head *work_q,
1252 , current->comm)); 1301 , current->comm));
1253 1302
1254 rtn = scsi_try_host_reset(scmd); 1303 rtn = scsi_try_host_reset(scmd);
1255 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { 1304 if (rtn == SUCCESS) {
1305 list_splice_init(work_q, &check_list);
1306 } else if (rtn == FAST_IO_FAIL) {
1256 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 1307 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1257 if (!scsi_device_online(scmd->device) ||
1258 rtn == FAST_IO_FAIL ||
1259 (!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) ||
1260 !scsi_eh_tur(scmd))
1261 scsi_eh_finish_cmd(scmd, done_q); 1308 scsi_eh_finish_cmd(scmd, done_q);
1262 } 1309 }
1263 } else { 1310 } else {
@@ -1266,7 +1313,7 @@ static int scsi_eh_host_reset(struct list_head *work_q,
1266 current->comm)); 1313 current->comm));
1267 } 1314 }
1268 } 1315 }
1269 return list_empty(work_q); 1316 return scsi_eh_test_devices(&check_list, work_q, done_q, 1);
1270} 1317}
1271 1318
1272/** 1319/**
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index b587289cfacb..2bea4f0b684a 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -59,6 +59,10 @@ scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
59 trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u", 59 trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
60 (unsigned long long)lba, (unsigned long long)txlen, 60 (unsigned long long)lba, (unsigned long long)txlen,
61 cdb[1] >> 5); 61 cdb[1] >> 5);
62
63 if (cdb[0] == WRITE_SAME)
64 trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
65
62 trace_seq_putc(p, 0); 66 trace_seq_putc(p, 0);
63 67
64 return ret; 68 return ret;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index bd0806e64e85..953773cb26d9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -490,7 +490,8 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
490 unsigned int max_blocks = 0; 490 unsigned int max_blocks = 0;
491 491
492 q->limits.discard_zeroes_data = sdkp->lbprz; 492 q->limits.discard_zeroes_data = sdkp->lbprz;
493 q->limits.discard_alignment = sdkp->unmap_alignment; 493 q->limits.discard_alignment = sdkp->unmap_alignment *
494 logical_block_size;
494 q->limits.discard_granularity = 495 q->limits.discard_granularity =
495 max(sdkp->physical_block_size, 496 max(sdkp->physical_block_size,
496 sdkp->unmap_granularity * logical_block_size); 497 sdkp->unmap_granularity * logical_block_size);
@@ -2021,16 +2022,26 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2021 2022
2022 int dbd; 2023 int dbd;
2023 int modepage; 2024 int modepage;
2025 int first_len;
2024 struct scsi_mode_data data; 2026 struct scsi_mode_data data;
2025 struct scsi_sense_hdr sshdr; 2027 struct scsi_sense_hdr sshdr;
2026 int old_wce = sdkp->WCE; 2028 int old_wce = sdkp->WCE;
2027 int old_rcd = sdkp->RCD; 2029 int old_rcd = sdkp->RCD;
2028 int old_dpofua = sdkp->DPOFUA; 2030 int old_dpofua = sdkp->DPOFUA;
2029 2031
2030 if (sdp->skip_ms_page_8) 2032 first_len = 4;
2031 goto defaults; 2033 if (sdp->skip_ms_page_8) {
2032 2034 if (sdp->type == TYPE_RBC)
2033 if (sdp->type == TYPE_RBC) { 2035 goto defaults;
2036 else {
2037 if (sdp->skip_ms_page_3f)
2038 goto defaults;
2039 modepage = 0x3F;
2040 if (sdp->use_192_bytes_for_3f)
2041 first_len = 192;
2042 dbd = 0;
2043 }
2044 } else if (sdp->type == TYPE_RBC) {
2034 modepage = 6; 2045 modepage = 6;
2035 dbd = 8; 2046 dbd = 8;
2036 } else { 2047 } else {
@@ -2039,13 +2050,15 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2039 } 2050 }
2040 2051
2041 /* cautiously ask */ 2052 /* cautiously ask */
2042 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, 4, &data, &sshdr); 2053 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, first_len,
2054 &data, &sshdr);
2043 2055
2044 if (!scsi_status_is_good(res)) 2056 if (!scsi_status_is_good(res))
2045 goto bad_sense; 2057 goto bad_sense;
2046 2058
2047 if (!data.header_length) { 2059 if (!data.header_length) {
2048 modepage = 6; 2060 modepage = 6;
2061 first_len = 0;
2049 sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n"); 2062 sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n");
2050 } 2063 }
2051 2064
@@ -2058,30 +2071,61 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2058 */ 2071 */
2059 if (len < 3) 2072 if (len < 3)
2060 goto bad_sense; 2073 goto bad_sense;
2061 if (len > 20) 2074 else if (len > SD_BUF_SIZE) {
2062 len = 20; 2075 sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
2063 2076 "data from %d to %d bytes\n", len, SD_BUF_SIZE);
2064 /* Take headers and block descriptors into account */ 2077 len = SD_BUF_SIZE;
2065 len += data.header_length + data.block_descriptor_length; 2078 }
2066 if (len > SD_BUF_SIZE) 2079 if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
2067 goto bad_sense; 2080 len = 192;
2068 2081
2069 /* Get the data */ 2082 /* Get the data */
2070 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr); 2083 if (len > first_len)
2084 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len,
2085 &data, &sshdr);
2071 2086
2072 if (scsi_status_is_good(res)) { 2087 if (scsi_status_is_good(res)) {
2073 int offset = data.header_length + data.block_descriptor_length; 2088 int offset = data.header_length + data.block_descriptor_length;
2074 2089
2075 if (offset >= SD_BUF_SIZE - 2) { 2090 while (offset < len) {
2076 sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n"); 2091 u8 page_code = buffer[offset] & 0x3F;
2077 goto defaults; 2092 u8 spf = buffer[offset] & 0x40;
2093
2094 if (page_code == 8 || page_code == 6) {
2095 /* We're interested only in the first 3 bytes.
2096 */
2097 if (len - offset <= 2) {
2098 sd_printk(KERN_ERR, sdkp, "Incomplete "
2099 "mode parameter data\n");
2100 goto defaults;
2101 } else {
2102 modepage = page_code;
2103 goto Page_found;
2104 }
2105 } else {
2106 /* Go to the next page */
2107 if (spf && len - offset > 3)
2108 offset += 4 + (buffer[offset+2] << 8) +
2109 buffer[offset+3];
2110 else if (!spf && len - offset > 1)
2111 offset += 2 + buffer[offset+1];
2112 else {
2113 sd_printk(KERN_ERR, sdkp, "Incomplete "
2114 "mode parameter data\n");
2115 goto defaults;
2116 }
2117 }
2078 } 2118 }
2079 2119
2080 if ((buffer[offset] & 0x3f) != modepage) { 2120 if (modepage == 0x3F) {
2121 sd_printk(KERN_ERR, sdkp, "No Caching mode page "
2122 "present\n");
2123 goto defaults;
2124 } else if ((buffer[offset] & 0x3f) != modepage) {
2081 sd_printk(KERN_ERR, sdkp, "Got wrong page\n"); 2125 sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
2082 goto defaults; 2126 goto defaults;
2083 } 2127 }
2084 2128 Page_found:
2085 if (modepage == 8) { 2129 if (modepage == 8) {
2086 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); 2130 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
2087 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); 2131 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index 9f4b58b7daad..7e22b737dfd8 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -307,7 +307,7 @@ static inline int find_and_clear_bit_16(unsigned long *field)
307 "0: bsfw %1,%w0\n\t" 307 "0: bsfw %1,%w0\n\t"
308 "btr %0,%1\n\t" 308 "btr %0,%1\n\t"
309 "jnc 0b" 309 "jnc 0b"
310 : "=&r" (rv), "=m" (*field) :); 310 : "=&r" (rv), "+m" (*field) :);
311 311
312 return rv; 312 return rv;
313} 313}
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 97ae716134d0..c0ee4ea28a19 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -2051,8 +2051,7 @@ wd33c93_init(struct Scsi_Host *instance, const wd33c93_regs regs,
2051 for (i = 0; i < MAX_SETUP_ARGS; i++) 2051 for (i = 0; i < MAX_SETUP_ARGS; i++)
2052 printk("%s,", setup_args[i]); 2052 printk("%s,", setup_args[i]);
2053 printk("\n"); 2053 printk("\n");
2054 printk(" Version %s - %s, Compiled %s at %s\n", 2054 printk(" Version %s - %s\n", WD33C93_VERSION, WD33C93_DATE);
2055 WD33C93_VERSION, WD33C93_DATE, __DATE__, __TIME__);
2056} 2055}
2057 2056
2058int 2057int
@@ -2132,8 +2131,8 @@ wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off
2132 bp = buf; 2131 bp = buf;
2133 *bp = '\0'; 2132 *bp = '\0';
2134 if (hd->proc & PR_VERSION) { 2133 if (hd->proc & PR_VERSION) {
2135 sprintf(tbuf, "\nVersion %s - %s. Compiled %s %s", 2134 sprintf(tbuf, "\nVersion %s - %s.",
2136 WD33C93_VERSION, WD33C93_DATE, __DATE__, __TIME__); 2135 WD33C93_VERSION, WD33C93_DATE);
2137 strcat(bp, tbuf); 2136 strcat(bp, tbuf);
2138 } 2137 }
2139 if (hd->proc & PR_INFO) { 2138 if (hd->proc & PR_INFO) {
diff --git a/drivers/staging/generic_serial/rio/rioinit.c b/drivers/staging/generic_serial/rio/rioinit.c
index 24a282bb89d4..fb62b383f1de 100644
--- a/drivers/staging/generic_serial/rio/rioinit.c
+++ b/drivers/staging/generic_serial/rio/rioinit.c
@@ -381,7 +381,7 @@ struct rioVersion *RIOVersid(void)
381{ 381{
382 strlcpy(stVersion.version, "RIO driver for linux V1.0", 382 strlcpy(stVersion.version, "RIO driver for linux V1.0",
383 sizeof(stVersion.version)); 383 sizeof(stVersion.version));
384 strlcpy(stVersion.buildDate, __DATE__, 384 strlcpy(stVersion.buildDate, "Aug 15 2010",
385 sizeof(stVersion.buildDate)); 385 sizeof(stVersion.buildDate));
386 386
387 return &stVersion; 387 return &stVersion;
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index aed4e464d31c..dee2a2c909f5 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -31,7 +31,7 @@
31#include <scsi/scsi_host.h> 31#include <scsi/scsi_host.h>
32#include <scsi/scsi_device.h> 32#include <scsi/scsi_device.h>
33#include <scsi/scsi_cmnd.h> 33#include <scsi/scsi_cmnd.h>
34#include <scsi/libsas.h> /* For TASK_ATTR_* */ 34#include <scsi/scsi_tcq.h>
35 35
36#include <target/target_core_base.h> 36#include <target/target_core_base.h>
37#include <target/target_core_transport.h> 37#include <target/target_core_transport.h>
@@ -95,17 +95,17 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
95 if (sc->device->tagged_supported) { 95 if (sc->device->tagged_supported) {
96 switch (sc->tag) { 96 switch (sc->tag) {
97 case HEAD_OF_QUEUE_TAG: 97 case HEAD_OF_QUEUE_TAG:
98 sam_task_attr = TASK_ATTR_HOQ; 98 sam_task_attr = MSG_HEAD_TAG;
99 break; 99 break;
100 case ORDERED_QUEUE_TAG: 100 case ORDERED_QUEUE_TAG:
101 sam_task_attr = TASK_ATTR_ORDERED; 101 sam_task_attr = MSG_ORDERED_TAG;
102 break; 102 break;
103 default: 103 default:
104 sam_task_attr = TASK_ATTR_SIMPLE; 104 sam_task_attr = MSG_SIMPLE_TAG;
105 break; 105 break;
106 } 106 }
107 } else 107 } else
108 sam_task_attr = TASK_ATTR_SIMPLE; 108 sam_task_attr = MSG_SIMPLE_TAG;
109 109
110 /* 110 /*
111 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 111 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
@@ -379,7 +379,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
379 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 379 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
380 */ 380 */
381 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0, 381 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
382 DMA_NONE, TASK_ATTR_SIMPLE, 382 DMA_NONE, MSG_SIMPLE_TAG,
383 &tl_cmd->tl_sense_buf[0]); 383 &tl_cmd->tl_sense_buf[0]);
384 /* 384 /*
385 * Allocate the LUN_RESET TMR 385 * Allocate the LUN_RESET TMR
@@ -939,18 +939,6 @@ static u16 tcm_loop_get_fabric_sense_len(void)
939 return 0; 939 return 0;
940} 940}
941 941
942static u64 tcm_loop_pack_lun(unsigned int lun)
943{
944 u64 result;
945
946 /* LSB of lun into byte 1 big-endian */
947 result = ((lun & 0xff) << 8);
948 /* use flat space addressing method */
949 result |= 0x40 | ((lun >> 8) & 0x3f);
950
951 return cpu_to_le64(result);
952}
953
954static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) 942static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
955{ 943{
956 switch (tl_hba->tl_proto_id) { 944 switch (tl_hba->tl_proto_id) {
@@ -1481,7 +1469,6 @@ static int tcm_loop_register_configfs(void)
1481 fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len; 1469 fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len;
1482 fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len; 1470 fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len;
1483 fabric->tf_ops.is_state_remove = &tcm_loop_is_state_remove; 1471 fabric->tf_ops.is_state_remove = &tcm_loop_is_state_remove;
1484 fabric->tf_ops.pack_lun = &tcm_loop_pack_lun;
1485 1472
1486 tf_cg = &fabric->tf_group; 1473 tf_cg = &fabric->tf_group;
1487 /* 1474 /*
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index a5f44a6e6e1d..ee6fad979b50 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -497,10 +497,6 @@ static int target_fabric_tf_ops_check(
497 printk(KERN_ERR "Missing tfo->is_state_remove()\n"); 497 printk(KERN_ERR "Missing tfo->is_state_remove()\n");
498 return -EINVAL; 498 return -EINVAL;
499 } 499 }
500 if (!(tfo->pack_lun)) {
501 printk(KERN_ERR "Missing tfo->pack_lun()\n");
502 return -EINVAL;
503 }
504 /* 500 /*
505 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() 501 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
506 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in 502 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index d25e20829012..8407f9ca2b31 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -38,6 +38,7 @@
38#include <net/sock.h> 38#include <net/sock.h>
39#include <net/tcp.h> 39#include <net/tcp.h>
40#include <scsi/scsi.h> 40#include <scsi/scsi.h>
41#include <scsi/scsi_device.h>
41 42
42#include <target/target_core_base.h> 43#include <target/target_core_base.h>
43#include <target/target_core_device.h> 44#include <target/target_core_device.h>
@@ -150,13 +151,13 @@ out:
150 151
151 { 152 {
152 struct se_device *dev = se_lun->lun_se_dev; 153 struct se_device *dev = se_lun->lun_se_dev;
153 spin_lock(&dev->stats_lock); 154 spin_lock_irq(&dev->stats_lock);
154 dev->num_cmds++; 155 dev->num_cmds++;
155 if (se_cmd->data_direction == DMA_TO_DEVICE) 156 if (se_cmd->data_direction == DMA_TO_DEVICE)
156 dev->write_bytes += se_cmd->data_length; 157 dev->write_bytes += se_cmd->data_length;
157 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 158 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
158 dev->read_bytes += se_cmd->data_length; 159 dev->read_bytes += se_cmd->data_length;
159 spin_unlock(&dev->stats_lock); 160 spin_unlock_irq(&dev->stats_lock);
160 } 161 }
161 162
162 /* 163 /*
@@ -658,8 +659,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
658 struct se_session *se_sess = SE_SESS(se_cmd); 659 struct se_session *se_sess = SE_SESS(se_cmd);
659 struct se_task *se_task; 660 struct se_task *se_task;
660 unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf; 661 unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf;
661 u32 cdb_offset = 0, lun_count = 0, offset = 8; 662 u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
662 u64 i, lun;
663 663
664 list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list) 664 list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list)
665 break; 665 break;
@@ -675,15 +675,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
675 * a $FABRIC_MOD. In that case, report LUN=0 only. 675 * a $FABRIC_MOD. In that case, report LUN=0 only.
676 */ 676 */
677 if (!(se_sess)) { 677 if (!(se_sess)) {
678 lun = 0; 678 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
679 buf[offset++] = ((lun >> 56) & 0xff);
680 buf[offset++] = ((lun >> 48) & 0xff);
681 buf[offset++] = ((lun >> 40) & 0xff);
682 buf[offset++] = ((lun >> 32) & 0xff);
683 buf[offset++] = ((lun >> 24) & 0xff);
684 buf[offset++] = ((lun >> 16) & 0xff);
685 buf[offset++] = ((lun >> 8) & 0xff);
686 buf[offset++] = (lun & 0xff);
687 lun_count = 1; 679 lun_count = 1;
688 goto done; 680 goto done;
689 } 681 }
@@ -703,15 +695,8 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
703 if ((cdb_offset + 8) >= se_cmd->data_length) 695 if ((cdb_offset + 8) >= se_cmd->data_length)
704 continue; 696 continue;
705 697
706 lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun)); 698 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
707 buf[offset++] = ((lun >> 56) & 0xff); 699 offset += 8;
708 buf[offset++] = ((lun >> 48) & 0xff);
709 buf[offset++] = ((lun >> 40) & 0xff);
710 buf[offset++] = ((lun >> 32) & 0xff);
711 buf[offset++] = ((lun >> 24) & 0xff);
712 buf[offset++] = ((lun >> 16) & 0xff);
713 buf[offset++] = ((lun >> 8) & 0xff);
714 buf[offset++] = (lun & 0xff);
715 cdb_offset += 8; 700 cdb_offset += 8;
716 } 701 }
717 spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); 702 spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 7ff6a35f26ac..331d423fd0e0 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -41,7 +41,7 @@
41#include <scsi/scsi_device.h> 41#include <scsi/scsi_device.h>
42#include <scsi/scsi_cmnd.h> 42#include <scsi/scsi_cmnd.h>
43#include <scsi/scsi_host.h> 43#include <scsi/scsi_host.h>
44#include <scsi/libsas.h> /* For TASK_ATTR_* */ 44#include <scsi/scsi_tcq.h>
45 45
46#include <target/target_core_base.h> 46#include <target/target_core_base.h>
47#include <target/target_core_device.h> 47#include <target/target_core_device.h>
@@ -911,7 +911,7 @@ static int pscsi_do_task(struct se_task *task)
911 * descriptor 911 * descriptor
912 */ 912 */
913 blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req, 913 blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req,
914 (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ), 914 (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
915 pscsi_req_done); 915 pscsi_req_done);
916 916
917 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 917 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 4a109835e420..59b8b9c5ad72 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -55,7 +55,8 @@ struct se_tmr_req *core_tmr_alloc_req(
55{ 55{
56 struct se_tmr_req *tmr; 56 struct se_tmr_req *tmr;
57 57
58 tmr = kmem_cache_zalloc(se_tmr_req_cache, GFP_KERNEL); 58 tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ?
59 GFP_ATOMIC : GFP_KERNEL);
59 if (!(tmr)) { 60 if (!(tmr)) {
60 printk(KERN_ERR "Unable to allocate struct se_tmr_req\n"); 61 printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
61 return ERR_PTR(-ENOMEM); 62 return ERR_PTR(-ENOMEM);
@@ -398,9 +399,9 @@ int core_tmr_lun_reset(
398 printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n"); 399 printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
399 } 400 }
400 401
401 spin_lock(&dev->stats_lock); 402 spin_lock_irq(&dev->stats_lock);
402 dev->num_resets++; 403 dev->num_resets++;
403 spin_unlock(&dev->stats_lock); 404 spin_unlock_irq(&dev->stats_lock);
404 405
405 DEBUG_LR("LUN_RESET: %s for [%s] Complete\n", 406 DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
406 (preempt_and_abort_list) ? "Preempt" : "TMR", 407 (preempt_and_abort_list) ? "Preempt" : "TMR",
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index b9d3501bdd91..4dafeb8b5638 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -42,7 +42,7 @@
42#include <net/tcp.h> 42#include <net/tcp.h>
43#include <scsi/scsi.h> 43#include <scsi/scsi.h>
44#include <scsi/scsi_cmnd.h> 44#include <scsi/scsi_cmnd.h>
45#include <scsi/libsas.h> /* For TASK_ATTR_* */ 45#include <scsi/scsi_tcq.h>
46 46
47#include <target/target_core_base.h> 47#include <target/target_core_base.h>
48#include <target/target_core_device.h> 48#include <target/target_core_device.h>
@@ -762,7 +762,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
762 transport_all_task_dev_remove_state(cmd); 762 transport_all_task_dev_remove_state(cmd);
763 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); 763 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
764 764
765 transport_free_dev_tasks(cmd);
766 765
767check_lun: 766check_lun:
768 spin_lock_irqsave(&lun->lun_cmd_lock, flags); 767 spin_lock_irqsave(&lun->lun_cmd_lock, flags);
@@ -1075,7 +1074,7 @@ static inline int transport_add_task_check_sam_attr(
1075 * head of the struct se_device->execute_task_list, and task_prev 1074 * head of the struct se_device->execute_task_list, and task_prev
1076 * after that for each subsequent task 1075 * after that for each subsequent task
1077 */ 1076 */
1078 if (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ) { 1077 if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
1079 list_add(&task->t_execute_list, 1078 list_add(&task->t_execute_list,
1080 (task_prev != NULL) ? 1079 (task_prev != NULL) ?
1081 &task_prev->t_execute_list : 1080 &task_prev->t_execute_list :
@@ -1195,6 +1194,7 @@ transport_get_task_from_execute_queue(struct se_device *dev)
1195 break; 1194 break;
1196 1195
1197 list_del(&task->t_execute_list); 1196 list_del(&task->t_execute_list);
1197 atomic_set(&task->task_execute_queue, 0);
1198 atomic_dec(&dev->execute_tasks); 1198 atomic_dec(&dev->execute_tasks);
1199 1199
1200 return task; 1200 return task;
@@ -1210,8 +1210,14 @@ void transport_remove_task_from_execute_queue(
1210{ 1210{
1211 unsigned long flags; 1211 unsigned long flags;
1212 1212
1213 if (atomic_read(&task->task_execute_queue) == 0) {
1214 dump_stack();
1215 return;
1216 }
1217
1213 spin_lock_irqsave(&dev->execute_task_lock, flags); 1218 spin_lock_irqsave(&dev->execute_task_lock, flags);
1214 list_del(&task->t_execute_list); 1219 list_del(&task->t_execute_list);
1220 atomic_set(&task->task_execute_queue, 0);
1215 atomic_dec(&dev->execute_tasks); 1221 atomic_dec(&dev->execute_tasks);
1216 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 1222 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
1217} 1223}
@@ -1867,7 +1873,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1867 if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) 1873 if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1868 return 0; 1874 return 0;
1869 1875
1870 if (cmd->sam_task_attr == TASK_ATTR_ACA) { 1876 if (cmd->sam_task_attr == MSG_ACA_TAG) {
1871 DEBUG_STA("SAM Task Attribute ACA" 1877 DEBUG_STA("SAM Task Attribute ACA"
1872 " emulation is not supported\n"); 1878 " emulation is not supported\n");
1873 return -1; 1879 return -1;
@@ -2058,6 +2064,13 @@ int transport_generic_handle_tmr(
2058} 2064}
2059EXPORT_SYMBOL(transport_generic_handle_tmr); 2065EXPORT_SYMBOL(transport_generic_handle_tmr);
2060 2066
2067void transport_generic_free_cmd_intr(
2068 struct se_cmd *cmd)
2069{
2070 transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR);
2071}
2072EXPORT_SYMBOL(transport_generic_free_cmd_intr);
2073
2061static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) 2074static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
2062{ 2075{
2063 struct se_task *task, *task_tmp; 2076 struct se_task *task, *task_tmp;
@@ -2504,7 +2517,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2504 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 2517 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
2505 * to allow the passed struct se_cmd list of tasks to the front of the list. 2518 * to allow the passed struct se_cmd list of tasks to the front of the list.
2506 */ 2519 */
2507 if (cmd->sam_task_attr == TASK_ATTR_HOQ) { 2520 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
2508 atomic_inc(&SE_DEV(cmd)->dev_hoq_count); 2521 atomic_inc(&SE_DEV(cmd)->dev_hoq_count);
2509 smp_mb__after_atomic_inc(); 2522 smp_mb__after_atomic_inc();
2510 DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" 2523 DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
@@ -2512,7 +2525,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2512 T_TASK(cmd)->t_task_cdb[0], 2525 T_TASK(cmd)->t_task_cdb[0],
2513 cmd->se_ordered_id); 2526 cmd->se_ordered_id);
2514 return 1; 2527 return 1;
2515 } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) { 2528 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
2516 spin_lock(&SE_DEV(cmd)->ordered_cmd_lock); 2529 spin_lock(&SE_DEV(cmd)->ordered_cmd_lock);
2517 list_add_tail(&cmd->se_ordered_list, 2530 list_add_tail(&cmd->se_ordered_list,
2518 &SE_DEV(cmd)->ordered_cmd_list); 2531 &SE_DEV(cmd)->ordered_cmd_list);
@@ -3411,7 +3424,7 @@ static int transport_generic_cmd_sequencer(
3411 * See spc4r17 section 5.3 3424 * See spc4r17 section 5.3
3412 */ 3425 */
3413 if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 3426 if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3414 cmd->sam_task_attr = TASK_ATTR_HOQ; 3427 cmd->sam_task_attr = MSG_HEAD_TAG;
3415 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; 3428 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3416 break; 3429 break;
3417 case READ_BUFFER: 3430 case READ_BUFFER:
@@ -3619,7 +3632,7 @@ static int transport_generic_cmd_sequencer(
3619 * See spc4r17 section 5.3 3632 * See spc4r17 section 5.3
3620 */ 3633 */
3621 if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 3634 if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3622 cmd->sam_task_attr = TASK_ATTR_HOQ; 3635 cmd->sam_task_attr = MSG_HEAD_TAG;
3623 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; 3636 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3624 break; 3637 break;
3625 default: 3638 default:
@@ -3777,21 +3790,21 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
3777 struct se_cmd *cmd_p, *cmd_tmp; 3790 struct se_cmd *cmd_p, *cmd_tmp;
3778 int new_active_tasks = 0; 3791 int new_active_tasks = 0;
3779 3792
3780 if (cmd->sam_task_attr == TASK_ATTR_SIMPLE) { 3793 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
3781 atomic_dec(&dev->simple_cmds); 3794 atomic_dec(&dev->simple_cmds);
3782 smp_mb__after_atomic_dec(); 3795 smp_mb__after_atomic_dec();
3783 dev->dev_cur_ordered_id++; 3796 dev->dev_cur_ordered_id++;
3784 DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for" 3797 DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
3785 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 3798 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3786 cmd->se_ordered_id); 3799 cmd->se_ordered_id);
3787 } else if (cmd->sam_task_attr == TASK_ATTR_HOQ) { 3800 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3788 atomic_dec(&dev->dev_hoq_count); 3801 atomic_dec(&dev->dev_hoq_count);
3789 smp_mb__after_atomic_dec(); 3802 smp_mb__after_atomic_dec();
3790 dev->dev_cur_ordered_id++; 3803 dev->dev_cur_ordered_id++;
3791 DEBUG_STA("Incremented dev_cur_ordered_id: %u for" 3804 DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
3792 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 3805 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3793 cmd->se_ordered_id); 3806 cmd->se_ordered_id);
3794 } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) { 3807 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3795 spin_lock(&dev->ordered_cmd_lock); 3808 spin_lock(&dev->ordered_cmd_lock);
3796 list_del(&cmd->se_ordered_list); 3809 list_del(&cmd->se_ordered_list);
3797 atomic_dec(&dev->dev_ordered_sync); 3810 atomic_dec(&dev->dev_ordered_sync);
@@ -3824,7 +3837,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
3824 new_active_tasks++; 3837 new_active_tasks++;
3825 3838
3826 spin_lock(&dev->delayed_cmd_lock); 3839 spin_lock(&dev->delayed_cmd_lock);
3827 if (cmd_p->sam_task_attr == TASK_ATTR_ORDERED) 3840 if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
3828 break; 3841 break;
3829 } 3842 }
3830 spin_unlock(&dev->delayed_cmd_lock); 3843 spin_unlock(&dev->delayed_cmd_lock);
@@ -4776,18 +4789,20 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4776 sg_end_cur->page_link &= ~0x02; 4789 sg_end_cur->page_link &= ~0x02;
4777 4790
4778 sg_chain(sg_head, task_sg_num, sg_head_cur); 4791 sg_chain(sg_head, task_sg_num, sg_head_cur);
4779 sg_count += (task->task_sg_num + 1);
4780 } else
4781 sg_count += task->task_sg_num; 4792 sg_count += task->task_sg_num;
4793 task_sg_num = (task->task_sg_num + 1);
4794 } else {
4795 sg_chain(sg_head, task_sg_num, sg_head_cur);
4796 sg_count += task->task_sg_num;
4797 task_sg_num = task->task_sg_num;
4798 }
4782 4799
4783 sg_head = sg_head_cur; 4800 sg_head = sg_head_cur;
4784 sg_link = sg_link_cur; 4801 sg_link = sg_link_cur;
4785 task_sg_num = task->task_sg_num;
4786 continue; 4802 continue;
4787 } 4803 }
4788 sg_head = sg_first = &task->task_sg[0]; 4804 sg_head = sg_first = &task->task_sg[0];
4789 sg_link = &task->task_sg[task->task_sg_num]; 4805 sg_link = &task->task_sg[task->task_sg_num];
4790 task_sg_num = task->task_sg_num;
4791 /* 4806 /*
4792 * Check for single task.. 4807 * Check for single task..
4793 */ 4808 */
@@ -4798,9 +4813,12 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4798 */ 4813 */
4799 sg_end = &task->task_sg[task->task_sg_num - 1]; 4814 sg_end = &task->task_sg[task->task_sg_num - 1];
4800 sg_end->page_link &= ~0x02; 4815 sg_end->page_link &= ~0x02;
4801 sg_count += (task->task_sg_num + 1);
4802 } else
4803 sg_count += task->task_sg_num; 4816 sg_count += task->task_sg_num;
4817 task_sg_num = (task->task_sg_num + 1);
4818 } else {
4819 sg_count += task->task_sg_num;
4820 task_sg_num = task->task_sg_num;
4821 }
4804 } 4822 }
4805 /* 4823 /*
4806 * Setup the starting pointer and total t_tasks_sg_linked_no including 4824 * Setup the starting pointer and total t_tasks_sg_linked_no including
@@ -4809,21 +4827,20 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4809 T_TASK(cmd)->t_tasks_sg_chained = sg_first; 4827 T_TASK(cmd)->t_tasks_sg_chained = sg_first;
4810 T_TASK(cmd)->t_tasks_sg_chained_no = sg_count; 4828 T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
4811 4829
4812 DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and" 4830 DEBUG_CMD_M("Setup cmd: %p T_TASK(cmd)->t_tasks_sg_chained: %p and"
4813 " t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained, 4831 " t_tasks_sg_chained_no: %u\n", cmd, T_TASK(cmd)->t_tasks_sg_chained,
4814 T_TASK(cmd)->t_tasks_sg_chained_no); 4832 T_TASK(cmd)->t_tasks_sg_chained_no);
4815 4833
4816 for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg, 4834 for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
4817 T_TASK(cmd)->t_tasks_sg_chained_no, i) { 4835 T_TASK(cmd)->t_tasks_sg_chained_no, i) {
4818 4836
4819 DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n", 4837 DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n",
4820 sg, sg_page(sg), sg->length, sg->offset); 4838 i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic);
4821 if (sg_is_chain(sg)) 4839 if (sg_is_chain(sg))
4822 DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg); 4840 DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
4823 if (sg_is_last(sg)) 4841 if (sg_is_last(sg))
4824 DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg); 4842 DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
4825 } 4843 }
4826
4827} 4844}
4828EXPORT_SYMBOL(transport_do_task_sg_chain); 4845EXPORT_SYMBOL(transport_do_task_sg_chain);
4829 4846
@@ -5297,6 +5314,8 @@ void transport_generic_free_cmd(
5297 if (wait_for_tasks && cmd->transport_wait_for_tasks) 5314 if (wait_for_tasks && cmd->transport_wait_for_tasks)
5298 cmd->transport_wait_for_tasks(cmd, 0, 0); 5315 cmd->transport_wait_for_tasks(cmd, 0, 0);
5299 5316
5317 transport_free_dev_tasks(cmd);
5318
5300 transport_generic_remove(cmd, release_to_pool, 5319 transport_generic_remove(cmd, release_to_pool,
5301 session_reinstatement); 5320 session_reinstatement);
5302 } 5321 }
@@ -6132,6 +6151,9 @@ get_cmd:
6132 case TRANSPORT_REMOVE: 6151 case TRANSPORT_REMOVE:
6133 transport_generic_remove(cmd, 1, 0); 6152 transport_generic_remove(cmd, 1, 0);
6134 break; 6153 break;
6154 case TRANSPORT_FREE_CMD_INTR:
6155 transport_generic_free_cmd(cmd, 0, 1, 0);
6156 break;
6135 case TRANSPORT_PROCESS_TMR: 6157 case TRANSPORT_PROCESS_TMR:
6136 transport_generic_do_tmr(cmd); 6158 transport_generic_do_tmr(cmd);
6137 break; 6159 break;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 49e51778f733..c056a1132ae1 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -35,6 +35,7 @@
35#include <scsi/scsi_host.h> 35#include <scsi/scsi_host.h>
36#include <scsi/scsi_device.h> 36#include <scsi/scsi_device.h>
37#include <scsi/scsi_cmnd.h> 37#include <scsi/scsi_cmnd.h>
38#include <scsi/scsi_tcq.h>
38#include <scsi/libfc.h> 39#include <scsi/libfc.h>
39#include <scsi/fc_encode.h> 40#include <scsi/fc_encode.h>
40 41
@@ -592,8 +593,25 @@ static void ft_send_cmd(struct ft_cmd *cmd)
592 case FCP_CFL_WRDATA | FCP_CFL_RDDATA: 593 case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
593 goto err; /* TBD not supported by tcm_fc yet */ 594 goto err; /* TBD not supported by tcm_fc yet */
594 } 595 }
596 /*
597 * Locate the SAM Task Attr from fc_pri_ta
598 */
599 switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
600 case FCP_PTA_HEADQ:
601 task_attr = MSG_HEAD_TAG;
602 break;
603 case FCP_PTA_ORDERED:
604 task_attr = MSG_ORDERED_TAG;
605 break;
606 case FCP_PTA_ACA:
607 task_attr = MSG_ACA_TAG;
608 break;
609 case FCP_PTA_SIMPLE: /* Fallthrough */
610 default:
611 task_attr = MSG_SIMPLE_TAG;
612 }
613
595 614
596 /* FCP_PTA_ maps 1:1 to TASK_ATTR_ */
597 task_attr = fcp->fc_pri_ta & FCP_PTA_MASK; 615 task_attr = fcp->fc_pri_ta & FCP_PTA_MASK;
598 data_len = ntohl(fcp->fc_dl); 616 data_len = ntohl(fcp->fc_dl);
599 cmd->cdb = fcp->fc_cdb; 617 cmd->cdb = fcp->fc_cdb;
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index fcdbbffe88cc..84e868c255dd 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -519,13 +519,6 @@ static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
519 return tpg->index; 519 return tpg->index;
520} 520}
521 521
522static u64 ft_pack_lun(unsigned int index)
523{
524 WARN_ON(index >= 256);
525 /* Caller wants this byte-swapped */
526 return cpu_to_le64((index & 0xff) << 8);
527}
528
529static struct target_core_fabric_ops ft_fabric_ops = { 522static struct target_core_fabric_ops ft_fabric_ops = {
530 .get_fabric_name = ft_get_fabric_name, 523 .get_fabric_name = ft_get_fabric_name,
531 .get_fabric_proto_ident = fc_get_fabric_proto_ident, 524 .get_fabric_proto_ident = fc_get_fabric_proto_ident,
@@ -564,7 +557,6 @@ static struct target_core_fabric_ops ft_fabric_ops = {
564 .get_fabric_sense_len = ft_get_fabric_sense_len, 557 .get_fabric_sense_len = ft_get_fabric_sense_len,
565 .set_fabric_sense_len = ft_set_fabric_sense_len, 558 .set_fabric_sense_len = ft_set_fabric_sense_len,
566 .is_state_remove = ft_is_state_remove, 559 .is_state_remove = ft_is_state_remove,
567 .pack_lun = ft_pack_lun,
568 /* 560 /*
569 * Setup function pointers for generic logic in 561 * Setup function pointers for generic logic in
570 * target_core_fabric_configfs.c 562 * target_core_fabric_configfs.c
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index bfa05e801823..c0e8f2eeb886 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -4096,8 +4096,7 @@ static int __init cy_init(void)
4096 if (!cy_serial_driver) 4096 if (!cy_serial_driver)
4097 goto err; 4097 goto err;
4098 4098
4099 printk(KERN_INFO "Cyclades driver " CY_VERSION " (built %s %s)\n", 4099 printk(KERN_INFO "Cyclades driver " CY_VERSION "\n");
4100 __DATE__, __TIME__);
4101 4100
4102 /* Initialize the tty_driver structure */ 4101 /* Initialize the tty_driver structure */
4103 4102
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index b1aecc7bb32a..fd347ff34d07 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -61,8 +61,7 @@
61#include <linux/delay.h> 61#include <linux/delay.h>
62 62
63 63
64#define VERSION_STRING DRIVER_DESC " 2.1d (build date: " \ 64#define VERSION_STRING DRIVER_DESC " 2.1d"
65 __DATE__ " " __TIME__ ")"
66 65
67/* Macros definitions */ 66/* Macros definitions */
68 67
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 652bdac8ce8e..6d5d6e679fc7 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1420,7 +1420,7 @@ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port,
1420 port->flags = UPF_BOOT_AUTOCONF; 1420 port->flags = UPF_BOOT_AUTOCONF;
1421 port->ops = &atmel_pops; 1421 port->ops = &atmel_pops;
1422 port->fifosize = 1; 1422 port->fifosize = 1;
1423 port->line = pdev->id; 1423 port->line = data->num;
1424 port->dev = &pdev->dev; 1424 port->dev = &pdev->dev;
1425 port->mapbase = pdev->resource[0].start; 1425 port->mapbase = pdev->resource[0].start;
1426 port->irq = pdev->resource[1].start; 1426 port->irq = pdev->resource[1].start;
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index bea5c215460c..84db7321cce8 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -907,9 +907,10 @@ static int m32r_sio_request_port(struct uart_port *port)
907 return ret; 907 return ret;
908} 908}
909 909
910static void m32r_sio_config_port(struct uart_port *port, int flags) 910static void m32r_sio_config_port(struct uart_port *port, int unused)
911{ 911{
912 struct uart_sio_port *up = (struct uart_sio_port *)port; 912 struct uart_sio_port *up = (struct uart_sio_port *)port;
913 unsigned long flags;
913 914
914 spin_lock_irqsave(&up->port.lock, flags); 915 spin_lock_irqsave(&up->port.lock, flags);
915 916
diff --git a/drivers/usb/otg/twl6030-usb.c b/drivers/usb/otg/twl6030-usb.c
index 3f2e07011a48..cfb5aa72b196 100644
--- a/drivers/usb/otg/twl6030-usb.c
+++ b/drivers/usb/otg/twl6030-usb.c
@@ -100,6 +100,7 @@ struct twl6030_usb {
100 u8 linkstat; 100 u8 linkstat;
101 u8 asleep; 101 u8 asleep;
102 bool irq_enabled; 102 bool irq_enabled;
103 unsigned long features;
103}; 104};
104 105
105#define xceiv_to_twl(x) container_of((x), struct twl6030_usb, otg) 106#define xceiv_to_twl(x) container_of((x), struct twl6030_usb, otg)
@@ -204,6 +205,12 @@ static int twl6030_start_srp(struct otg_transceiver *x)
204 205
205static int twl6030_usb_ldo_init(struct twl6030_usb *twl) 206static int twl6030_usb_ldo_init(struct twl6030_usb *twl)
206{ 207{
208 char *regulator_name;
209
210 if (twl->features & TWL6025_SUBCLASS)
211 regulator_name = "ldousb";
212 else
213 regulator_name = "vusb";
207 214
208 /* Set to OTG_REV 1.3 and turn on the ID_WAKEUP_COMP */ 215 /* Set to OTG_REV 1.3 and turn on the ID_WAKEUP_COMP */
209 twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x1, TWL6030_BACKUP_REG); 216 twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x1, TWL6030_BACKUP_REG);
@@ -214,7 +221,7 @@ static int twl6030_usb_ldo_init(struct twl6030_usb *twl)
214 /* Program MISC2 register and set bit VUSB_IN_VBAT */ 221 /* Program MISC2 register and set bit VUSB_IN_VBAT */
215 twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x10, TWL6030_MISC2); 222 twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x10, TWL6030_MISC2);
216 223
217 twl->usb3v3 = regulator_get(twl->dev, "vusb"); 224 twl->usb3v3 = regulator_get(twl->dev, regulator_name);
218 if (IS_ERR(twl->usb3v3)) 225 if (IS_ERR(twl->usb3v3))
219 return -ENODEV; 226 return -ENODEV;
220 227
@@ -409,6 +416,7 @@ static int __devinit twl6030_usb_probe(struct platform_device *pdev)
409 twl->dev = &pdev->dev; 416 twl->dev = &pdev->dev;
410 twl->irq1 = platform_get_irq(pdev, 0); 417 twl->irq1 = platform_get_irq(pdev, 0);
411 twl->irq2 = platform_get_irq(pdev, 1); 418 twl->irq2 = platform_get_irq(pdev, 1);
419 twl->features = pdata->features;
412 twl->otg.dev = twl->dev; 420 twl->otg.dev = twl->dev;
413 twl->otg.label = "twl6030"; 421 twl->otg.label = "twl6030";
414 twl->otg.set_host = twl6030_set_host; 422 twl->otg.set_host = twl6030_set_host;
diff --git a/drivers/video/mb862xx/mb862xxfbdrv.c b/drivers/video/mb862xx/mb862xxfbdrv.c
index ea39336addfb..f70bd63b0187 100644
--- a/drivers/video/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/mb862xx/mb862xxfbdrv.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/fb.h> 17#include <linux/fb.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/uaccess.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/interrupt.h> 21#include <linux/interrupt.h>
21#include <linux/pci.h> 22#include <linux/pci.h>
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 7c608c5ccf84..00d615d7aa21 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -42,7 +42,7 @@ config W1_MASTER_MXC
42 42
43config W1_MASTER_DS1WM 43config W1_MASTER_DS1WM
44 tristate "Maxim DS1WM 1-wire busmaster" 44 tristate "Maxim DS1WM 1-wire busmaster"
45 depends on W1 && ARM && HAVE_CLK 45 depends on W1
46 help 46 help
47 Say Y here to enable the DS1WM 1-wire driver, such as that 47 Say Y here to enable the DS1WM 1-wire driver, such as that
48 in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like 48 in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index 0855d6cce3c1..ad57593d224a 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -33,6 +33,7 @@
33#define DS1WM_INT 0x02 /* R/W interrupt status */ 33#define DS1WM_INT 0x02 /* R/W interrupt status */
34#define DS1WM_INT_EN 0x03 /* R/W interrupt enable */ 34#define DS1WM_INT_EN 0x03 /* R/W interrupt enable */
35#define DS1WM_CLKDIV 0x04 /* R/W 5 bits of divisor and pre-scale */ 35#define DS1WM_CLKDIV 0x04 /* R/W 5 bits of divisor and pre-scale */
36#define DS1WM_CNTRL 0x05 /* R/W master control register (not used yet) */
36 37
37#define DS1WM_CMD_1W_RESET (1 << 0) /* force reset on 1-wire bus */ 38#define DS1WM_CMD_1W_RESET (1 << 0) /* force reset on 1-wire bus */
38#define DS1WM_CMD_SRA (1 << 1) /* enable Search ROM accelerator mode */ 39#define DS1WM_CMD_SRA (1 << 1) /* enable Search ROM accelerator mode */
@@ -56,6 +57,7 @@
56#define DS1WM_INTEN_ERSRF (1 << 5) /* enable rx shift register full int */ 57#define DS1WM_INTEN_ERSRF (1 << 5) /* enable rx shift register full int */
57#define DS1WM_INTEN_DQO (1 << 6) /* enable direct bus driving ops */ 58#define DS1WM_INTEN_DQO (1 << 6) /* enable direct bus driving ops */
58 59
60#define DS1WM_INTEN_NOT_IAS (~DS1WM_INTEN_IAS) /* all but INTR active state */
59 61
60#define DS1WM_TIMEOUT (HZ * 5) 62#define DS1WM_TIMEOUT (HZ * 5)
61 63
@@ -63,41 +65,50 @@ static struct {
63 unsigned long freq; 65 unsigned long freq;
64 unsigned long divisor; 66 unsigned long divisor;
65} freq[] = { 67} freq[] = {
66 { 4000000, 0x8 }, 68 { 1000000, 0x80 },
67 { 5000000, 0x2 }, 69 { 2000000, 0x84 },
68 { 6000000, 0x5 }, 70 { 3000000, 0x81 },
69 { 7000000, 0x3 }, 71 { 4000000, 0x88 },
70 { 8000000, 0xc }, 72 { 5000000, 0x82 },
71 { 10000000, 0x6 }, 73 { 6000000, 0x85 },
72 { 12000000, 0x9 }, 74 { 7000000, 0x83 },
73 { 14000000, 0x7 }, 75 { 8000000, 0x8c },
74 { 16000000, 0x10 }, 76 { 10000000, 0x86 },
75 { 20000000, 0xa }, 77 { 12000000, 0x89 },
76 { 24000000, 0xd }, 78 { 14000000, 0x87 },
77 { 28000000, 0xb }, 79 { 16000000, 0x90 },
78 { 32000000, 0x14 }, 80 { 20000000, 0x8a },
79 { 40000000, 0xe }, 81 { 24000000, 0x8d },
80 { 48000000, 0x11 }, 82 { 28000000, 0x8b },
81 { 56000000, 0xf }, 83 { 32000000, 0x94 },
82 { 64000000, 0x18 }, 84 { 40000000, 0x8e },
83 { 80000000, 0x12 }, 85 { 48000000, 0x91 },
84 { 96000000, 0x15 }, 86 { 56000000, 0x8f },
85 { 112000000, 0x13 }, 87 { 64000000, 0x98 },
86 { 128000000, 0x1c }, 88 { 80000000, 0x92 },
89 { 96000000, 0x95 },
90 { 112000000, 0x93 },
91 { 128000000, 0x9c },
92/* you can continue this table, consult the OPERATION - CLOCK DIVISOR
93 section of the ds1wm spec sheet. */
87}; 94};
88 95
89struct ds1wm_data { 96struct ds1wm_data {
90 void __iomem *map; 97 void __iomem *map;
91 int bus_shift; /* # of shifts to calc register offsets */ 98 int bus_shift; /* # of shifts to calc register offsets */
92 struct platform_device *pdev; 99 struct platform_device *pdev;
93 const struct mfd_cell *cell; 100 const struct mfd_cell *cell;
94 int irq; 101 int irq;
95 int active_high; 102 int slave_present;
96 int slave_present; 103 void *reset_complete;
97 void *reset_complete; 104 void *read_complete;
98 void *read_complete; 105 void *write_complete;
99 void *write_complete; 106 int read_error;
100 u8 read_byte; /* last byte received */ 107 /* last byte received */
108 u8 read_byte;
109 /* byte to write that makes all intr disabled, */
110 /* considering active_state (IAS) (optimization) */
111 u8 int_en_reg_none;
101}; 112};
102 113
103static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg, 114static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg,
@@ -115,23 +126,39 @@ static inline u8 ds1wm_read_register(struct ds1wm_data *ds1wm_data, u32 reg)
115static irqreturn_t ds1wm_isr(int isr, void *data) 126static irqreturn_t ds1wm_isr(int isr, void *data)
116{ 127{
117 struct ds1wm_data *ds1wm_data = data; 128 struct ds1wm_data *ds1wm_data = data;
118 u8 intr = ds1wm_read_register(ds1wm_data, DS1WM_INT); 129 u8 intr;
130 u8 inten = ds1wm_read_register(ds1wm_data, DS1WM_INT_EN);
131 /* if no bits are set in int enable register (except the IAS)
132 than go no further, reading the regs below has side effects */
133 if (!(inten & DS1WM_INTEN_NOT_IAS))
134 return IRQ_NONE;
119 135
120 ds1wm_data->slave_present = (intr & DS1WM_INT_PDR) ? 0 : 1; 136 ds1wm_write_register(ds1wm_data,
137 DS1WM_INT_EN, ds1wm_data->int_en_reg_none);
121 138
122 if ((intr & DS1WM_INT_PD) && ds1wm_data->reset_complete) 139 /* this read action clears the INTR and certain flags in ds1wm */
123 complete(ds1wm_data->reset_complete); 140 intr = ds1wm_read_register(ds1wm_data, DS1WM_INT);
124 141
125 if ((intr & DS1WM_INT_TSRE) && ds1wm_data->write_complete) 142 ds1wm_data->slave_present = (intr & DS1WM_INT_PDR) ? 0 : 1;
126 complete(ds1wm_data->write_complete);
127 143
144 if ((intr & DS1WM_INT_TSRE) && ds1wm_data->write_complete) {
145 inten &= ~DS1WM_INTEN_ETMT;
146 complete(ds1wm_data->write_complete);
147 }
128 if (intr & DS1WM_INT_RBF) { 148 if (intr & DS1WM_INT_RBF) {
149 /* this read clears the RBF flag */
129 ds1wm_data->read_byte = ds1wm_read_register(ds1wm_data, 150 ds1wm_data->read_byte = ds1wm_read_register(ds1wm_data,
130 DS1WM_DATA); 151 DS1WM_DATA);
152 inten &= ~DS1WM_INTEN_ERBF;
131 if (ds1wm_data->read_complete) 153 if (ds1wm_data->read_complete)
132 complete(ds1wm_data->read_complete); 154 complete(ds1wm_data->read_complete);
133 } 155 }
156 if ((intr & DS1WM_INT_PD) && ds1wm_data->reset_complete) {
157 inten &= ~DS1WM_INTEN_EPD;
158 complete(ds1wm_data->reset_complete);
159 }
134 160
161 ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, inten);
135 return IRQ_HANDLED; 162 return IRQ_HANDLED;
136} 163}
137 164
@@ -142,33 +169,19 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data)
142 169
143 ds1wm_data->reset_complete = &reset_done; 170 ds1wm_data->reset_complete = &reset_done;
144 171
172 /* enable Presence detect only */
145 ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, DS1WM_INTEN_EPD | 173 ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, DS1WM_INTEN_EPD |
146 (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0)); 174 ds1wm_data->int_en_reg_none);
147 175
148 ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_1W_RESET); 176 ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_1W_RESET);
149 177
150 timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT); 178 timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT);
151 ds1wm_data->reset_complete = NULL; 179 ds1wm_data->reset_complete = NULL;
152 if (!timeleft) { 180 if (!timeleft) {
153 dev_err(&ds1wm_data->pdev->dev, "reset failed\n"); 181 dev_err(&ds1wm_data->pdev->dev, "reset failed, timed out\n");
154 return 1; 182 return 1;
155 } 183 }
156 184
157 /* Wait for the end of the reset. According to the specs, the time
158 * from when the interrupt is asserted to the end of the reset is:
159 * tRSTH - tPDH - tPDL - tPDI
160 * 625 us - 60 us - 240 us - 100 ns = 324.9 us
161 *
162 * We'll wait a bit longer just to be sure.
163 * Was udelay(500), but if it is going to busywait the cpu that long,
164 * might as well come back later.
165 */
166 msleep(1);
167
168 ds1wm_write_register(ds1wm_data, DS1WM_INT_EN,
169 DS1WM_INTEN_ERBF | DS1WM_INTEN_ETMT | DS1WM_INTEN_EPD |
170 (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0));
171
172 if (!ds1wm_data->slave_present) { 185 if (!ds1wm_data->slave_present) {
173 dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n"); 186 dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n");
174 return 1; 187 return 1;
@@ -179,26 +192,47 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data)
179 192
180static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data) 193static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data)
181{ 194{
195 unsigned long timeleft;
182 DECLARE_COMPLETION_ONSTACK(write_done); 196 DECLARE_COMPLETION_ONSTACK(write_done);
183 ds1wm_data->write_complete = &write_done; 197 ds1wm_data->write_complete = &write_done;
184 198
199 ds1wm_write_register(ds1wm_data, DS1WM_INT_EN,
200 ds1wm_data->int_en_reg_none | DS1WM_INTEN_ETMT);
201
185 ds1wm_write_register(ds1wm_data, DS1WM_DATA, data); 202 ds1wm_write_register(ds1wm_data, DS1WM_DATA, data);
186 203
187 wait_for_completion_timeout(&write_done, DS1WM_TIMEOUT); 204 timeleft = wait_for_completion_timeout(&write_done, DS1WM_TIMEOUT);
205
188 ds1wm_data->write_complete = NULL; 206 ds1wm_data->write_complete = NULL;
207 if (!timeleft) {
208 dev_err(&ds1wm_data->pdev->dev, "write failed, timed out\n");
209 return -ETIMEDOUT;
210 }
189 211
190 return 0; 212 return 0;
191} 213}
192 214
193static int ds1wm_read(struct ds1wm_data *ds1wm_data, unsigned char write_data) 215static u8 ds1wm_read(struct ds1wm_data *ds1wm_data, unsigned char write_data)
194{ 216{
217 unsigned long timeleft;
218 u8 intEnable = DS1WM_INTEN_ERBF | ds1wm_data->int_en_reg_none;
195 DECLARE_COMPLETION_ONSTACK(read_done); 219 DECLARE_COMPLETION_ONSTACK(read_done);
220
221 ds1wm_read_register(ds1wm_data, DS1WM_DATA);
222
196 ds1wm_data->read_complete = &read_done; 223 ds1wm_data->read_complete = &read_done;
224 ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, intEnable);
197 225
198 ds1wm_write(ds1wm_data, write_data); 226 ds1wm_write_register(ds1wm_data, DS1WM_DATA, write_data);
199 wait_for_completion_timeout(&read_done, DS1WM_TIMEOUT); 227 timeleft = wait_for_completion_timeout(&read_done, DS1WM_TIMEOUT);
200 ds1wm_data->read_complete = NULL;
201 228
229 ds1wm_data->read_complete = NULL;
230 if (!timeleft) {
231 dev_err(&ds1wm_data->pdev->dev, "read failed, timed out\n");
232 ds1wm_data->read_error = -ETIMEDOUT;
233 return 0xFF;
234 }
235 ds1wm_data->read_error = 0;
202 return ds1wm_data->read_byte; 236 return ds1wm_data->read_byte;
203} 237}
204 238
@@ -206,8 +240,8 @@ static int ds1wm_find_divisor(int gclk)
206{ 240{
207 int i; 241 int i;
208 242
209 for (i = 0; i < ARRAY_SIZE(freq); i++) 243 for (i = ARRAY_SIZE(freq)-1; i >= 0; --i)
210 if (gclk <= freq[i].freq) 244 if (gclk >= freq[i].freq)
211 return freq[i].divisor; 245 return freq[i].divisor;
212 246
213 return 0; 247 return 0;
@@ -222,6 +256,8 @@ static void ds1wm_up(struct ds1wm_data *ds1wm_data)
222 ds1wm_data->cell->enable(ds1wm_data->pdev); 256 ds1wm_data->cell->enable(ds1wm_data->pdev);
223 257
224 divisor = ds1wm_find_divisor(plat->clock_rate); 258 divisor = ds1wm_find_divisor(plat->clock_rate);
259 dev_dbg(&ds1wm_data->pdev->dev,
260 "found divisor 0x%x for clock %d\n", divisor, plat->clock_rate);
225 if (divisor == 0) { 261 if (divisor == 0) {
226 dev_err(&ds1wm_data->pdev->dev, 262 dev_err(&ds1wm_data->pdev->dev,
227 "no suitable divisor for %dHz clock\n", 263 "no suitable divisor for %dHz clock\n",
@@ -242,7 +278,7 @@ static void ds1wm_down(struct ds1wm_data *ds1wm_data)
242 278
243 /* Disable interrupts. */ 279 /* Disable interrupts. */
244 ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, 280 ds1wm_write_register(ds1wm_data, DS1WM_INT_EN,
245 ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0); 281 ds1wm_data->int_en_reg_none);
246 282
247 if (ds1wm_data->cell->disable) 283 if (ds1wm_data->cell->disable)
248 ds1wm_data->cell->disable(ds1wm_data->pdev); 284 ds1wm_data->cell->disable(ds1wm_data->pdev);
@@ -279,41 +315,121 @@ static void ds1wm_search(void *data, struct w1_master *master_dev,
279{ 315{
280 struct ds1wm_data *ds1wm_data = data; 316 struct ds1wm_data *ds1wm_data = data;
281 int i; 317 int i;
282 unsigned long long rom_id; 318 int ms_discrep_bit = -1;
283 319 u64 r = 0; /* holds the progress of the search */
284 /* XXX We need to iterate for multiple devices per the DS1WM docs. 320 u64 r_prime, d;
285 * See http://www.maxim-ic.com/appnotes.cfm/appnote_number/120. */ 321 unsigned slaves_found = 0;
286 if (ds1wm_reset(ds1wm_data)) 322 unsigned int pass = 0;
287 return; 323
288 324 dev_dbg(&ds1wm_data->pdev->dev, "search begin\n");
289 ds1wm_write(ds1wm_data, search_type); 325 while (true) {
290 ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_SRA); 326 ++pass;
291 327 if (pass > 100) {
292 for (rom_id = 0, i = 0; i < 16; i++) { 328 dev_dbg(&ds1wm_data->pdev->dev,
293 329 "too many attempts (100), search aborted\n");
294 unsigned char resp, r, d; 330 return;
295 331 }
296 resp = ds1wm_read(ds1wm_data, 0x00); 332
297 333 if (ds1wm_reset(ds1wm_data)) {
298 r = ((resp & 0x02) >> 1) | 334 dev_dbg(&ds1wm_data->pdev->dev,
299 ((resp & 0x08) >> 2) | 335 "pass: %d reset error (or no slaves)\n", pass);
300 ((resp & 0x20) >> 3) | 336 break;
301 ((resp & 0x80) >> 4); 337 }
302 338
303 d = ((resp & 0x01) >> 0) | 339 dev_dbg(&ds1wm_data->pdev->dev,
304 ((resp & 0x04) >> 1) | 340 "pass: %d r : %0#18llx writing SEARCH_ROM\n", pass, r);
305 ((resp & 0x10) >> 2) | 341 ds1wm_write(ds1wm_data, search_type);
306 ((resp & 0x40) >> 3); 342 dev_dbg(&ds1wm_data->pdev->dev,
307 343 "pass: %d entering ASM\n", pass);
308 rom_id |= (unsigned long long) r << (i * 4); 344 ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_SRA);
309 345 dev_dbg(&ds1wm_data->pdev->dev,
310 } 346 "pass: %d begining nibble loop\n", pass);
311 dev_dbg(&ds1wm_data->pdev->dev, "found 0x%08llX\n", rom_id); 347
312 348 r_prime = 0;
313 ds1wm_write_register(ds1wm_data, DS1WM_CMD, ~DS1WM_CMD_SRA); 349 d = 0;
314 ds1wm_reset(ds1wm_data); 350 /* we work one nibble at a time */
315 351 /* each nibble is interleaved to form a byte */
316 slave_found(master_dev, rom_id); 352 for (i = 0; i < 16; i++) {
353
354 unsigned char resp, _r, _r_prime, _d;
355
356 _r = (r >> (4*i)) & 0xf;
357 _r = ((_r & 0x1) << 1) |
358 ((_r & 0x2) << 2) |
359 ((_r & 0x4) << 3) |
360 ((_r & 0x8) << 4);
361
362 /* writes _r, then reads back: */
363 resp = ds1wm_read(ds1wm_data, _r);
364
365 if (ds1wm_data->read_error) {
366 dev_err(&ds1wm_data->pdev->dev,
367 "pass: %d nibble: %d read error\n", pass, i);
368 break;
369 }
370
371 _r_prime = ((resp & 0x02) >> 1) |
372 ((resp & 0x08) >> 2) |
373 ((resp & 0x20) >> 3) |
374 ((resp & 0x80) >> 4);
375
376 _d = ((resp & 0x01) >> 0) |
377 ((resp & 0x04) >> 1) |
378 ((resp & 0x10) >> 2) |
379 ((resp & 0x40) >> 3);
380
381 r_prime |= (unsigned long long) _r_prime << (i * 4);
382 d |= (unsigned long long) _d << (i * 4);
383
384 }
385 if (ds1wm_data->read_error) {
386 dev_err(&ds1wm_data->pdev->dev,
387 "pass: %d read error, retrying\n", pass);
388 break;
389 }
390 dev_dbg(&ds1wm_data->pdev->dev,
391 "pass: %d r\': %0#18llx d:%0#18llx\n",
392 pass, r_prime, d);
393 dev_dbg(&ds1wm_data->pdev->dev,
394 "pass: %d nibble loop complete, exiting ASM\n", pass);
395 ds1wm_write_register(ds1wm_data, DS1WM_CMD, ~DS1WM_CMD_SRA);
396 dev_dbg(&ds1wm_data->pdev->dev,
397 "pass: %d resetting bus\n", pass);
398 ds1wm_reset(ds1wm_data);
399 if ((r_prime & ((u64)1 << 63)) && (d & ((u64)1 << 63))) {
400 dev_err(&ds1wm_data->pdev->dev,
401 "pass: %d bus error, retrying\n", pass);
402 continue; /* start over */
403 }
404
405
406 dev_dbg(&ds1wm_data->pdev->dev,
407 "pass: %d found %0#18llx\n", pass, r_prime);
408 slave_found(master_dev, r_prime);
409 ++slaves_found;
410 dev_dbg(&ds1wm_data->pdev->dev,
411 "pass: %d complete, preparing next pass\n", pass);
412
413 /* any discrepency found which we already choose the
414 '1' branch is now is now irrelevant we reveal the
415 next branch with this: */
416 d &= ~r;
417 /* find last bit set, i.e. the most signif. bit set */
418 ms_discrep_bit = fls64(d) - 1;
419 dev_dbg(&ds1wm_data->pdev->dev,
420 "pass: %d new d:%0#18llx MS discrep bit:%d\n",
421 pass, d, ms_discrep_bit);
422
423 /* prev_ms_discrep_bit = ms_discrep_bit;
424 prepare for next ROM search: */
425 if (ms_discrep_bit == -1)
426 break;
427
428 r = (r & ~(~0ull << (ms_discrep_bit))) | 1 << ms_discrep_bit;
429 } /* end while true */
430 dev_dbg(&ds1wm_data->pdev->dev,
431 "pass: %d total: %d search done ms d bit pos: %d\n", pass,
432 slaves_found, ms_discrep_bit);
317} 433}
318 434
319/* --------------------------------------------------------------------- */ 435/* --------------------------------------------------------------------- */
@@ -373,15 +489,15 @@ static int ds1wm_probe(struct platform_device *pdev)
373 goto err1; 489 goto err1;
374 } 490 }
375 ds1wm_data->irq = res->start; 491 ds1wm_data->irq = res->start;
376 ds1wm_data->active_high = plat->active_high; 492 ds1wm_data->int_en_reg_none = (plat->active_high ? DS1WM_INTEN_IAS : 0);
377 493
378 if (res->flags & IORESOURCE_IRQ_HIGHEDGE) 494 if (res->flags & IORESOURCE_IRQ_HIGHEDGE)
379 irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING); 495 irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING);
380 if (res->flags & IORESOURCE_IRQ_LOWEDGE) 496 if (res->flags & IORESOURCE_IRQ_LOWEDGE)
381 irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING); 497 irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING);
382 498
383 ret = request_irq(ds1wm_data->irq, ds1wm_isr, IRQF_DISABLED, 499 ret = request_irq(ds1wm_data->irq, ds1wm_isr,
384 "ds1wm", ds1wm_data); 500 IRQF_DISABLED | IRQF_SHARED, "ds1wm", ds1wm_data);
385 if (ret) 501 if (ret)
386 goto err1; 502 goto err1;
387 503
@@ -468,5 +584,6 @@ module_exit(ds1wm_exit);
468 584
469MODULE_LICENSE("GPL"); 585MODULE_LICENSE("GPL");
470MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>, " 586MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>, "
471 "Matt Reimer <mreimer@vpop.net>"); 587 "Matt Reimer <mreimer@vpop.net>,"
588 "Jean-Francois Dagenais <dagenaisj@sonatest.com>");
472MODULE_DESCRIPTION("DS1WM w1 busmaster driver"); 589MODULE_DESCRIPTION("DS1WM w1 busmaster driver");
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index f0c909625bd1..d0cb01b42012 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -16,6 +16,13 @@ config W1_SLAVE_SMEM
16 Say Y here if you want to connect 1-wire 16 Say Y here if you want to connect 1-wire
17 simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire. 17 simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire.
18 18
19config W1_SLAVE_DS2408
20 tristate "8-Channel Addressable Switch (IO Expander) 0x29 family support (DS2408)"
21 help
22 Say Y here if you want to use a 1-wire
23
24 DS2408 8-Channel Addressable Switch device support
25
19config W1_SLAVE_DS2423 26config W1_SLAVE_DS2423
20 tristate "Counter 1-wire device (DS2423)" 27 tristate "Counter 1-wire device (DS2423)"
21 select CRC16 28 select CRC16
@@ -61,6 +68,19 @@ config W1_SLAVE_DS2760
61 68
62 If you are unsure, say N. 69 If you are unsure, say N.
63 70
71config W1_SLAVE_DS2780
72 tristate "Dallas 2780 battery monitor chip"
73 depends on W1
74 help
75 If you enable this you will have the DS2780 battery monitor
76 chip support.
77
78 The battery monitor chip is used in many batteries/devices
79 as the one who is responsible for charging/discharging/monitoring
80 Li+ batteries.
81
82 If you are unsure, say N.
83
64config W1_SLAVE_BQ27000 84config W1_SLAVE_BQ27000
65 tristate "BQ27000 slave support" 85 tristate "BQ27000 slave support"
66 depends on W1 86 depends on W1
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index 3c76350a24f7..1f31e9fb0b25 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -4,8 +4,10 @@
4 4
5obj-$(CONFIG_W1_SLAVE_THERM) += w1_therm.o 5obj-$(CONFIG_W1_SLAVE_THERM) += w1_therm.o
6obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o 6obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o
7obj-$(CONFIG_W1_SLAVE_DS2408) += w1_ds2408.o
7obj-$(CONFIG_W1_SLAVE_DS2423) += w1_ds2423.o 8obj-$(CONFIG_W1_SLAVE_DS2423) += w1_ds2423.o
8obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o 9obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o
9obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o 10obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o
10obj-$(CONFIG_W1_SLAVE_DS2760) += w1_ds2760.o 11obj-$(CONFIG_W1_SLAVE_DS2760) += w1_ds2760.o
12obj-$(CONFIG_W1_SLAVE_DS2780) += w1_ds2780.o
11obj-$(CONFIG_W1_SLAVE_BQ27000) += w1_bq27000.o 13obj-$(CONFIG_W1_SLAVE_BQ27000) += w1_bq27000.o
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c
new file mode 100644
index 000000000000..c37781899d90
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2408.c
@@ -0,0 +1,402 @@
1/*
2 * w1_ds2408.c - w1 family 29 (DS2408) driver
3 *
4 * Copyright (c) 2010 Jean-Francois Dagenais <dagenaisj@sonatest.com>
5 *
6 * This source code is licensed under the GNU General Public License,
7 * Version 2. See the file COPYING for more details.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/device.h>
14#include <linux/types.h>
15#include <linux/delay.h>
16#include <linux/slab.h>
17
18#include "../w1.h"
19#include "../w1_int.h"
20#include "../w1_family.h"
21
22MODULE_LICENSE("GPL");
23MODULE_AUTHOR("Jean-Francois Dagenais <dagenaisj@sonatest.com>");
24MODULE_DESCRIPTION("w1 family 29 driver for DS2408 8 Pin IO");
25
26
27#define W1_F29_RETRIES 3
28
29#define W1_F29_REG_LOGIG_STATE 0x88 /* R */
30#define W1_F29_REG_OUTPUT_LATCH_STATE 0x89 /* R */
31#define W1_F29_REG_ACTIVITY_LATCH_STATE 0x8A /* R */
32#define W1_F29_REG_COND_SEARCH_SELECT_MASK 0x8B /* RW */
33#define W1_F29_REG_COND_SEARCH_POL_SELECT 0x8C /* RW */
34#define W1_F29_REG_CONTROL_AND_STATUS 0x8D /* RW */
35
36#define W1_F29_FUNC_READ_PIO_REGS 0xF0
37#define W1_F29_FUNC_CHANN_ACCESS_READ 0xF5
38#define W1_F29_FUNC_CHANN_ACCESS_WRITE 0x5A
39/* also used to write the control/status reg (0x8D): */
40#define W1_F29_FUNC_WRITE_COND_SEARCH_REG 0xCC
41#define W1_F29_FUNC_RESET_ACTIVITY_LATCHES 0xC3
42
43#define W1_F29_SUCCESS_CONFIRM_BYTE 0xAA
44
45static int _read_reg(struct w1_slave *sl, u8 address, unsigned char* buf)
46{
47 u8 wrbuf[3];
48 dev_dbg(&sl->dev,
49 "Reading with slave: %p, reg addr: %0#4x, buff addr: %p",
50 sl, (unsigned int)address, buf);
51
52 if (!buf)
53 return -EINVAL;
54
55 mutex_lock(&sl->master->mutex);
56 dev_dbg(&sl->dev, "mutex locked");
57
58 if (w1_reset_select_slave(sl)) {
59 mutex_unlock(&sl->master->mutex);
60 return -EIO;
61 }
62
63 wrbuf[0] = W1_F29_FUNC_READ_PIO_REGS;
64 wrbuf[1] = address;
65 wrbuf[2] = 0;
66 w1_write_block(sl->master, wrbuf, 3);
67 *buf = w1_read_8(sl->master);
68
69 mutex_unlock(&sl->master->mutex);
70 dev_dbg(&sl->dev, "mutex unlocked");
71 return 1;
72}
73
74static ssize_t w1_f29_read_state(
75 struct file *filp, struct kobject *kobj,
76 struct bin_attribute *bin_attr,
77 char *buf, loff_t off, size_t count)
78{
79 dev_dbg(&kobj_to_w1_slave(kobj)->dev,
80 "Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
81 bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
82 if (count != 1 || off != 0)
83 return -EFAULT;
84 return _read_reg(kobj_to_w1_slave(kobj), W1_F29_REG_LOGIG_STATE, buf);
85}
86
87static ssize_t w1_f29_read_output(
88 struct file *filp, struct kobject *kobj,
89 struct bin_attribute *bin_attr,
90 char *buf, loff_t off, size_t count)
91{
92 dev_dbg(&kobj_to_w1_slave(kobj)->dev,
93 "Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
94 bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
95 if (count != 1 || off != 0)
96 return -EFAULT;
97 return _read_reg(kobj_to_w1_slave(kobj),
98 W1_F29_REG_OUTPUT_LATCH_STATE, buf);
99}
100
101static ssize_t w1_f29_read_activity(
102 struct file *filp, struct kobject *kobj,
103 struct bin_attribute *bin_attr,
104 char *buf, loff_t off, size_t count)
105{
106 dev_dbg(&kobj_to_w1_slave(kobj)->dev,
107 "Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
108 bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
109 if (count != 1 || off != 0)
110 return -EFAULT;
111 return _read_reg(kobj_to_w1_slave(kobj),
112 W1_F29_REG_ACTIVITY_LATCH_STATE, buf);
113}
114
115static ssize_t w1_f29_read_cond_search_mask(
116 struct file *filp, struct kobject *kobj,
117 struct bin_attribute *bin_attr,
118 char *buf, loff_t off, size_t count)
119{
120 dev_dbg(&kobj_to_w1_slave(kobj)->dev,
121 "Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
122 bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
123 if (count != 1 || off != 0)
124 return -EFAULT;
125 return _read_reg(kobj_to_w1_slave(kobj),
126 W1_F29_REG_COND_SEARCH_SELECT_MASK, buf);
127}
128
129static ssize_t w1_f29_read_cond_search_polarity(
130 struct file *filp, struct kobject *kobj,
131 struct bin_attribute *bin_attr,
132 char *buf, loff_t off, size_t count)
133{
134 if (count != 1 || off != 0)
135 return -EFAULT;
136 return _read_reg(kobj_to_w1_slave(kobj),
137 W1_F29_REG_COND_SEARCH_POL_SELECT, buf);
138}
139
140static ssize_t w1_f29_read_status_control(
141 struct file *filp, struct kobject *kobj,
142 struct bin_attribute *bin_attr,
143 char *buf, loff_t off, size_t count)
144{
145 if (count != 1 || off != 0)
146 return -EFAULT;
147 return _read_reg(kobj_to_w1_slave(kobj),
148 W1_F29_REG_CONTROL_AND_STATUS, buf);
149}
150
151
152
153
154static ssize_t w1_f29_write_output(
155 struct file *filp, struct kobject *kobj,
156 struct bin_attribute *bin_attr,
157 char *buf, loff_t off, size_t count)
158{
159 struct w1_slave *sl = kobj_to_w1_slave(kobj);
160 u8 w1_buf[3];
161 u8 readBack;
162 unsigned int retries = W1_F29_RETRIES;
163
164 if (count != 1 || off != 0)
165 return -EFAULT;
166
167 dev_dbg(&sl->dev, "locking mutex for write_output");
168 mutex_lock(&sl->master->mutex);
169 dev_dbg(&sl->dev, "mutex locked");
170
171 if (w1_reset_select_slave(sl))
172 goto error;
173
174 while (retries--) {
175 w1_buf[0] = W1_F29_FUNC_CHANN_ACCESS_WRITE;
176 w1_buf[1] = *buf;
177 w1_buf[2] = ~(*buf);
178 w1_write_block(sl->master, w1_buf, 3);
179
180 readBack = w1_read_8(sl->master);
181 /* here the master could read another byte which
182 would be the PIO reg (the actual pin logic state)
183 since in this driver we don't know which pins are
184 in and outs, there's no value to read the state and
185 compare. with (*buf) so end this command abruptly: */
186 if (w1_reset_resume_command(sl->master))
187 goto error;
188
189 if (readBack != 0xAA) {
190 /* try again, the slave is ready for a command */
191 continue;
192 }
193
194 /* go read back the output latches */
195 /* (the direct effect of the write above) */
196 w1_buf[0] = W1_F29_FUNC_READ_PIO_REGS;
197 w1_buf[1] = W1_F29_REG_OUTPUT_LATCH_STATE;
198 w1_buf[2] = 0;
199 w1_write_block(sl->master, w1_buf, 3);
200 /* read the result of the READ_PIO_REGS command */
201 if (w1_read_8(sl->master) == *buf) {
202 /* success! */
203 mutex_unlock(&sl->master->mutex);
204 dev_dbg(&sl->dev,
205 "mutex unlocked, retries:%d", retries);
206 return 1;
207 }
208 }
209error:
210 mutex_unlock(&sl->master->mutex);
211 dev_dbg(&sl->dev, "mutex unlocked in error, retries:%d", retries);
212
213 return -EIO;
214}
215
216
217/**
218 * Writing to the activity file resets the activity latches.
219 */
220static ssize_t w1_f29_write_activity(
221 struct file *filp, struct kobject *kobj,
222 struct bin_attribute *bin_attr,
223 char *buf, loff_t off, size_t count)
224{
225 struct w1_slave *sl = kobj_to_w1_slave(kobj);
226 unsigned int retries = W1_F29_RETRIES;
227
228 if (count != 1 || off != 0)
229 return -EFAULT;
230
231 mutex_lock(&sl->master->mutex);
232
233 if (w1_reset_select_slave(sl))
234 goto error;
235
236 while (retries--) {
237 w1_write_8(sl->master, W1_F29_FUNC_RESET_ACTIVITY_LATCHES);
238 if (w1_read_8(sl->master) == W1_F29_SUCCESS_CONFIRM_BYTE) {
239 mutex_unlock(&sl->master->mutex);
240 return 1;
241 }
242 if (w1_reset_resume_command(sl->master))
243 goto error;
244 }
245
246error:
247 mutex_unlock(&sl->master->mutex);
248 return -EIO;
249}
250
251static ssize_t w1_f29_write_status_control(
252 struct file *filp,
253 struct kobject *kobj,
254 struct bin_attribute *bin_attr,
255 char *buf,
256 loff_t off,
257 size_t count)
258{
259 struct w1_slave *sl = kobj_to_w1_slave(kobj);
260 u8 w1_buf[4];
261 unsigned int retries = W1_F29_RETRIES;
262
263 if (count != 1 || off != 0)
264 return -EFAULT;
265
266 mutex_lock(&sl->master->mutex);
267
268 if (w1_reset_select_slave(sl))
269 goto error;
270
271 while (retries--) {
272 w1_buf[0] = W1_F29_FUNC_WRITE_COND_SEARCH_REG;
273 w1_buf[1] = W1_F29_REG_CONTROL_AND_STATUS;
274 w1_buf[2] = 0;
275 w1_buf[3] = *buf;
276
277 w1_write_block(sl->master, w1_buf, 4);
278 if (w1_reset_resume_command(sl->master))
279 goto error;
280
281 w1_buf[0] = W1_F29_FUNC_READ_PIO_REGS;
282 w1_buf[1] = W1_F29_REG_CONTROL_AND_STATUS;
283 w1_buf[2] = 0;
284
285 w1_write_block(sl->master, w1_buf, 3);
286 if (w1_read_8(sl->master) == *buf) {
287 /* success! */
288 mutex_unlock(&sl->master->mutex);
289 return 1;
290 }
291 }
292error:
293 mutex_unlock(&sl->master->mutex);
294
295 return -EIO;
296}
297
298
299
300#define NB_SYSFS_BIN_FILES 6
301static struct bin_attribute w1_f29_sysfs_bin_files[NB_SYSFS_BIN_FILES] = {
302 {
303 .attr = {
304 .name = "state",
305 .mode = S_IRUGO,
306 },
307 .size = 1,
308 .read = w1_f29_read_state,
309 },
310 {
311 .attr = {
312 .name = "output",
313 .mode = S_IRUGO | S_IWUSR | S_IWGRP,
314 },
315 .size = 1,
316 .read = w1_f29_read_output,
317 .write = w1_f29_write_output,
318 },
319 {
320 .attr = {
321 .name = "activity",
322 .mode = S_IRUGO,
323 },
324 .size = 1,
325 .read = w1_f29_read_activity,
326 .write = w1_f29_write_activity,
327 },
328 {
329 .attr = {
330 .name = "cond_search_mask",
331 .mode = S_IRUGO,
332 },
333 .size = 1,
334 .read = w1_f29_read_cond_search_mask,
335 .write = 0,
336 },
337 {
338 .attr = {
339 .name = "cond_search_polarity",
340 .mode = S_IRUGO,
341 },
342 .size = 1,
343 .read = w1_f29_read_cond_search_polarity,
344 .write = 0,
345 },
346 {
347 .attr = {
348 .name = "status_control",
349 .mode = S_IRUGO | S_IWUSR | S_IWGRP,
350 },
351 .size = 1,
352 .read = w1_f29_read_status_control,
353 .write = w1_f29_write_status_control,
354 }
355};
356
357static int w1_f29_add_slave(struct w1_slave *sl)
358{
359 int err = 0;
360 int i;
361
362 for (i = 0; i < NB_SYSFS_BIN_FILES && !err; ++i)
363 err = sysfs_create_bin_file(
364 &sl->dev.kobj,
365 &(w1_f29_sysfs_bin_files[i]));
366 if (err)
367 while (--i >= 0)
368 sysfs_remove_bin_file(&sl->dev.kobj,
369 &(w1_f29_sysfs_bin_files[i]));
370 return err;
371}
372
373static void w1_f29_remove_slave(struct w1_slave *sl)
374{
375 int i;
376 for (i = NB_SYSFS_BIN_FILES; i <= 0; --i)
377 sysfs_remove_bin_file(&sl->dev.kobj,
378 &(w1_f29_sysfs_bin_files[i]));
379}
380
381static struct w1_family_ops w1_f29_fops = {
382 .add_slave = w1_f29_add_slave,
383 .remove_slave = w1_f29_remove_slave,
384};
385
386static struct w1_family w1_family_29 = {
387 .fid = W1_FAMILY_DS2408,
388 .fops = &w1_f29_fops,
389};
390
391static int __init w1_f29_init(void)
392{
393 return w1_register_family(&w1_family_29);
394}
395
396static void __exit w1_f29_exit(void)
397{
398 w1_unregister_family(&w1_family_29);
399}
400
401module_init(w1_f29_init);
402module_exit(w1_f29_exit);
diff --git a/drivers/w1/slaves/w1_ds2780.c b/drivers/w1/slaves/w1_ds2780.c
new file mode 100644
index 000000000000..274c8f38303f
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2780.c
@@ -0,0 +1,217 @@
1/*
2 * 1-Wire implementation for the ds2780 chip
3 *
4 * Copyright (C) 2010 Indesign, LLC
5 *
6 * Author: Clifton Barnes <cabarnes@indesign-llc.com>
7 *
8 * Based on w1-ds2760 driver
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/device.h>
19#include <linux/types.h>
20#include <linux/platform_device.h>
21#include <linux/mutex.h>
22#include <linux/idr.h>
23
24#include "../w1.h"
25#include "../w1_int.h"
26#include "../w1_family.h"
27#include "w1_ds2780.h"
28
29int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
30 int io)
31{
32 struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
33
34 if (!dev)
35 return -ENODEV;
36
37 mutex_lock(&sl->master->mutex);
38
39 if (addr > DS2780_DATA_SIZE || addr < 0) {
40 count = 0;
41 goto out;
42 }
43 count = min_t(int, count, DS2780_DATA_SIZE - addr);
44
45 if (w1_reset_select_slave(sl) == 0) {
46 if (io) {
47 w1_write_8(sl->master, W1_DS2780_WRITE_DATA);
48 w1_write_8(sl->master, addr);
49 w1_write_block(sl->master, buf, count);
50 /* XXX w1_write_block returns void, not n_written */
51 } else {
52 w1_write_8(sl->master, W1_DS2780_READ_DATA);
53 w1_write_8(sl->master, addr);
54 count = w1_read_block(sl->master, buf, count);
55 }
56 }
57
58out:
59 mutex_unlock(&sl->master->mutex);
60
61 return count;
62}
63EXPORT_SYMBOL(w1_ds2780_io);
64
65int w1_ds2780_eeprom_cmd(struct device *dev, int addr, int cmd)
66{
67 struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
68
69 if (!dev)
70 return -EINVAL;
71
72 mutex_lock(&sl->master->mutex);
73
74 if (w1_reset_select_slave(sl) == 0) {
75 w1_write_8(sl->master, cmd);
76 w1_write_8(sl->master, addr);
77 }
78
79 mutex_unlock(&sl->master->mutex);
80 return 0;
81}
82EXPORT_SYMBOL(w1_ds2780_eeprom_cmd);
83
84static ssize_t w1_ds2780_read_bin(struct file *filp,
85 struct kobject *kobj,
86 struct bin_attribute *bin_attr,
87 char *buf, loff_t off, size_t count)
88{
89 struct device *dev = container_of(kobj, struct device, kobj);
90 return w1_ds2780_io(dev, buf, off, count, 0);
91}
92
93static struct bin_attribute w1_ds2780_bin_attr = {
94 .attr = {
95 .name = "w1_slave",
96 .mode = S_IRUGO,
97 },
98 .size = DS2780_DATA_SIZE,
99 .read = w1_ds2780_read_bin,
100};
101
102static DEFINE_IDR(bat_idr);
103static DEFINE_MUTEX(bat_idr_lock);
104
105static int new_bat_id(void)
106{
107 int ret;
108
109 while (1) {
110 int id;
111
112 ret = idr_pre_get(&bat_idr, GFP_KERNEL);
113 if (ret == 0)
114 return -ENOMEM;
115
116 mutex_lock(&bat_idr_lock);
117 ret = idr_get_new(&bat_idr, NULL, &id);
118 mutex_unlock(&bat_idr_lock);
119
120 if (ret == 0) {
121 ret = id & MAX_ID_MASK;
122 break;
123 } else if (ret == -EAGAIN) {
124 continue;
125 } else {
126 break;
127 }
128 }
129
130 return ret;
131}
132
133static void release_bat_id(int id)
134{
135 mutex_lock(&bat_idr_lock);
136 idr_remove(&bat_idr, id);
137 mutex_unlock(&bat_idr_lock);
138}
139
140static int w1_ds2780_add_slave(struct w1_slave *sl)
141{
142 int ret;
143 int id;
144 struct platform_device *pdev;
145
146 id = new_bat_id();
147 if (id < 0) {
148 ret = id;
149 goto noid;
150 }
151
152 pdev = platform_device_alloc("ds2780-battery", id);
153 if (!pdev) {
154 ret = -ENOMEM;
155 goto pdev_alloc_failed;
156 }
157 pdev->dev.parent = &sl->dev;
158
159 ret = platform_device_add(pdev);
160 if (ret)
161 goto pdev_add_failed;
162
163 ret = sysfs_create_bin_file(&sl->dev.kobj, &w1_ds2780_bin_attr);
164 if (ret)
165 goto bin_attr_failed;
166
167 dev_set_drvdata(&sl->dev, pdev);
168
169 return 0;
170
171bin_attr_failed:
172pdev_add_failed:
173 platform_device_unregister(pdev);
174pdev_alloc_failed:
175 release_bat_id(id);
176noid:
177 return ret;
178}
179
180static void w1_ds2780_remove_slave(struct w1_slave *sl)
181{
182 struct platform_device *pdev = dev_get_drvdata(&sl->dev);
183 int id = pdev->id;
184
185 platform_device_unregister(pdev);
186 release_bat_id(id);
187 sysfs_remove_bin_file(&sl->dev.kobj, &w1_ds2780_bin_attr);
188}
189
190static struct w1_family_ops w1_ds2780_fops = {
191 .add_slave = w1_ds2780_add_slave,
192 .remove_slave = w1_ds2780_remove_slave,
193};
194
195static struct w1_family w1_ds2780_family = {
196 .fid = W1_FAMILY_DS2780,
197 .fops = &w1_ds2780_fops,
198};
199
200static int __init w1_ds2780_init(void)
201{
202 idr_init(&bat_idr);
203 return w1_register_family(&w1_ds2780_family);
204}
205
206static void __exit w1_ds2780_exit(void)
207{
208 w1_unregister_family(&w1_ds2780_family);
209 idr_destroy(&bat_idr);
210}
211
212module_init(w1_ds2780_init);
213module_exit(w1_ds2780_exit);
214
215MODULE_LICENSE("GPL");
216MODULE_AUTHOR("Clifton Barnes <cabarnes@indesign-llc.com>");
217MODULE_DESCRIPTION("1-wire Driver for Maxim/Dallas DS2780 Stand-Alone Fuel Gauge IC");
diff --git a/drivers/w1/slaves/w1_ds2780.h b/drivers/w1/slaves/w1_ds2780.h
new file mode 100644
index 000000000000..a1fba79eb1b5
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2780.h
@@ -0,0 +1,129 @@
1/*
2 * 1-Wire implementation for the ds2780 chip
3 *
4 * Copyright (C) 2010 Indesign, LLC
5 *
6 * Author: Clifton Barnes <cabarnes@indesign-llc.com>
7 *
8 * Based on w1-ds2760 driver
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#ifndef _W1_DS2780_H
17#define _W1_DS2780_H
18
19/* Function commands */
20#define W1_DS2780_READ_DATA 0x69
21#define W1_DS2780_WRITE_DATA 0x6C
22#define W1_DS2780_COPY_DATA 0x48
23#define W1_DS2780_RECALL_DATA 0xB8
24#define W1_DS2780_LOCK 0x6A
25
26/* Register map */
27/* Register 0x00 Reserved */
28#define DS2780_STATUS_REG 0x01
29#define DS2780_RAAC_MSB_REG 0x02
30#define DS2780_RAAC_LSB_REG 0x03
31#define DS2780_RSAC_MSB_REG 0x04
32#define DS2780_RSAC_LSB_REG 0x05
33#define DS2780_RARC_REG 0x06
34#define DS2780_RSRC_REG 0x07
35#define DS2780_IAVG_MSB_REG 0x08
36#define DS2780_IAVG_LSB_REG 0x09
37#define DS2780_TEMP_MSB_REG 0x0A
38#define DS2780_TEMP_LSB_REG 0x0B
39#define DS2780_VOLT_MSB_REG 0x0C
40#define DS2780_VOLT_LSB_REG 0x0D
41#define DS2780_CURRENT_MSB_REG 0x0E
42#define DS2780_CURRENT_LSB_REG 0x0F
43#define DS2780_ACR_MSB_REG 0x10
44#define DS2780_ACR_LSB_REG 0x11
45#define DS2780_ACRL_MSB_REG 0x12
46#define DS2780_ACRL_LSB_REG 0x13
47#define DS2780_AS_REG 0x14
48#define DS2780_SFR_REG 0x15
49#define DS2780_FULL_MSB_REG 0x16
50#define DS2780_FULL_LSB_REG 0x17
51#define DS2780_AE_MSB_REG 0x18
52#define DS2780_AE_LSB_REG 0x19
53#define DS2780_SE_MSB_REG 0x1A
54#define DS2780_SE_LSB_REG 0x1B
55/* Register 0x1C - 0x1E Reserved */
56#define DS2780_EEPROM_REG 0x1F
57#define DS2780_EEPROM_BLOCK0_START 0x20
58/* Register 0x20 - 0x2F User EEPROM */
59#define DS2780_EEPROM_BLOCK0_END 0x2F
60/* Register 0x30 - 0x5F Reserved */
61#define DS2780_EEPROM_BLOCK1_START 0x60
62#define DS2780_CONTROL_REG 0x60
63#define DS2780_AB_REG 0x61
64#define DS2780_AC_MSB_REG 0x62
65#define DS2780_AC_LSB_REG 0x63
66#define DS2780_VCHG_REG 0x64
67#define DS2780_IMIN_REG 0x65
68#define DS2780_VAE_REG 0x66
69#define DS2780_IAE_REG 0x67
70#define DS2780_AE_40_REG 0x68
71#define DS2780_RSNSP_REG 0x69
72#define DS2780_FULL_40_MSB_REG 0x6A
73#define DS2780_FULL_40_LSB_REG 0x6B
74#define DS2780_FULL_3040_SLOPE_REG 0x6C
75#define DS2780_FULL_2030_SLOPE_REG 0x6D
76#define DS2780_FULL_1020_SLOPE_REG 0x6E
77#define DS2780_FULL_0010_SLOPE_REG 0x6F
78#define DS2780_AE_3040_SLOPE_REG 0x70
79#define DS2780_AE_2030_SLOPE_REG 0x71
80#define DS2780_AE_1020_SLOPE_REG 0x72
81#define DS2780_AE_0010_SLOPE_REG 0x73
82#define DS2780_SE_3040_SLOPE_REG 0x74
83#define DS2780_SE_2030_SLOPE_REG 0x75
84#define DS2780_SE_1020_SLOPE_REG 0x76
85#define DS2780_SE_0010_SLOPE_REG 0x77
86#define DS2780_RSGAIN_MSB_REG 0x78
87#define DS2780_RSGAIN_LSB_REG 0x79
88#define DS2780_RSTC_REG 0x7A
89#define DS2780_FRSGAIN_MSB_REG 0x7B
90#define DS2780_FRSGAIN_LSB_REG 0x7C
91#define DS2780_EEPROM_BLOCK1_END 0x7C
92/* Register 0x7D - 0xFF Reserved */
93
94/* Number of valid register addresses */
95#define DS2780_DATA_SIZE 0x80
96
97/* Status register bits */
98#define DS2780_STATUS_REG_CHGTF (1 << 7)
99#define DS2780_STATUS_REG_AEF (1 << 6)
100#define DS2780_STATUS_REG_SEF (1 << 5)
101#define DS2780_STATUS_REG_LEARNF (1 << 4)
102/* Bit 3 Reserved */
103#define DS2780_STATUS_REG_UVF (1 << 2)
104#define DS2780_STATUS_REG_PORF (1 << 1)
105/* Bit 0 Reserved */
106
107/* Control register bits */
108/* Bit 7 Reserved */
109#define DS2780_CONTROL_REG_UVEN (1 << 6)
110#define DS2780_CONTROL_REG_PMOD (1 << 5)
111#define DS2780_CONTROL_REG_RNAOP (1 << 4)
112/* Bit 0 - 3 Reserved */
113
114/* Special feature register bits */
115/* Bit 1 - 7 Reserved */
116#define DS2780_SFR_REG_PIOSC (1 << 0)
117
118/* EEPROM register bits */
119#define DS2780_EEPROM_REG_EEC (1 << 7)
120#define DS2780_EEPROM_REG_LOCK (1 << 6)
121/* Bit 2 - 6 Reserved */
122#define DS2780_EEPROM_REG_BL1 (1 << 1)
123#define DS2780_EEPROM_REG_BL0 (1 << 0)
124
125extern int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
126 int io);
127extern int w1_ds2780_eeprom_cmd(struct device *dev, int addr, int cmd);
128
129#endif /* !_W1_DS2780_H */
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index b7b5014ff714..10606c822756 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -827,7 +827,7 @@ void w1_reconnect_slaves(struct w1_family *f, int attach)
827 mutex_unlock(&w1_mlock); 827 mutex_unlock(&w1_mlock);
828} 828}
829 829
830static void w1_slave_found(struct w1_master *dev, u64 rn) 830void w1_slave_found(struct w1_master *dev, u64 rn)
831{ 831{
832 struct w1_slave *sl; 832 struct w1_slave *sl;
833 struct w1_reg_num *tmp; 833 struct w1_reg_num *tmp;
@@ -933,14 +933,15 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb
933 } 933 }
934} 934}
935 935
936void w1_search_process(struct w1_master *dev, u8 search_type) 936void w1_search_process_cb(struct w1_master *dev, u8 search_type,
937 w1_slave_found_callback cb)
937{ 938{
938 struct w1_slave *sl, *sln; 939 struct w1_slave *sl, *sln;
939 940
940 list_for_each_entry(sl, &dev->slist, w1_slave_entry) 941 list_for_each_entry(sl, &dev->slist, w1_slave_entry)
941 clear_bit(W1_SLAVE_ACTIVE, (long *)&sl->flags); 942 clear_bit(W1_SLAVE_ACTIVE, (long *)&sl->flags);
942 943
943 w1_search_devices(dev, search_type, w1_slave_found); 944 w1_search_devices(dev, search_type, cb);
944 945
945 list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) { 946 list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) {
946 if (!test_bit(W1_SLAVE_ACTIVE, (unsigned long *)&sl->flags) && !--sl->ttl) 947 if (!test_bit(W1_SLAVE_ACTIVE, (unsigned long *)&sl->flags) && !--sl->ttl)
@@ -953,6 +954,11 @@ void w1_search_process(struct w1_master *dev, u8 search_type)
953 dev->search_count--; 954 dev->search_count--;
954} 955}
955 956
957static void w1_search_process(struct w1_master *dev, u8 search_type)
958{
959 w1_search_process_cb(dev, search_type, w1_slave_found);
960}
961
956int w1_process(void *data) 962int w1_process(void *data)
957{ 963{
958 struct w1_master *dev = (struct w1_master *) data; 964 struct w1_master *dev = (struct w1_master *) data;
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h
index d8a9709f3449..1ce23fc6186c 100644
--- a/drivers/w1/w1.h
+++ b/drivers/w1/w1.h
@@ -55,6 +55,7 @@ struct w1_reg_num
55#define W1_READ_ROM 0x33 55#define W1_READ_ROM 0x33
56#define W1_READ_PSUPPLY 0xB4 56#define W1_READ_PSUPPLY 0xB4
57#define W1_MATCH_ROM 0x55 57#define W1_MATCH_ROM 0x55
58#define W1_RESUME_CMD 0xA5
58 59
59#define W1_SLAVE_ACTIVE 0 60#define W1_SLAVE_ACTIVE 0
60 61
@@ -193,7 +194,9 @@ void w1_destroy_master_attributes(struct w1_master *master);
193void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb); 194void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb);
194void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb); 195void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb);
195struct w1_slave *w1_search_slave(struct w1_reg_num *id); 196struct w1_slave *w1_search_slave(struct w1_reg_num *id);
196void w1_search_process(struct w1_master *dev, u8 search_type); 197void w1_slave_found(struct w1_master *dev, u64 rn);
198void w1_search_process_cb(struct w1_master *dev, u8 search_type,
199 w1_slave_found_callback cb);
197struct w1_master *w1_search_master_id(u32 id); 200struct w1_master *w1_search_master_id(u32 id);
198 201
199/* Disconnect and reconnect devices in the given family. Used for finding 202/* Disconnect and reconnect devices in the given family. Used for finding
@@ -213,6 +216,7 @@ void w1_write_block(struct w1_master *, const u8 *, int);
213void w1_touch_block(struct w1_master *, u8 *, int); 216void w1_touch_block(struct w1_master *, u8 *, int);
214u8 w1_read_block(struct w1_master *, u8 *, int); 217u8 w1_read_block(struct w1_master *, u8 *, int);
215int w1_reset_select_slave(struct w1_slave *sl); 218int w1_reset_select_slave(struct w1_slave *sl);
219int w1_reset_resume_command(struct w1_master *);
216void w1_next_pullup(struct w1_master *, int); 220void w1_next_pullup(struct w1_master *, int);
217 221
218static inline struct w1_slave* dev_to_w1_slave(struct device *dev) 222static inline struct w1_slave* dev_to_w1_slave(struct device *dev)
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index f3b636d7cafe..97479ae70b9c 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -34,8 +34,10 @@
34#define W1_THERM_DS1822 0x22 34#define W1_THERM_DS1822 0x22
35#define W1_EEPROM_DS2433 0x23 35#define W1_EEPROM_DS2433 0x23
36#define W1_THERM_DS18B20 0x28 36#define W1_THERM_DS18B20 0x28
37#define W1_FAMILY_DS2408 0x29
37#define W1_EEPROM_DS2431 0x2D 38#define W1_EEPROM_DS2431 0x2D
38#define W1_FAMILY_DS2760 0x30 39#define W1_FAMILY_DS2760 0x30
40#define W1_FAMILY_DS2780 0x32
39 41
40#define MAXNAMELEN 32 42#define MAXNAMELEN 32
41 43
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index 3ebe9726a9e5..8e8b64cfafb6 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -390,6 +390,32 @@ int w1_reset_select_slave(struct w1_slave *sl)
390EXPORT_SYMBOL_GPL(w1_reset_select_slave); 390EXPORT_SYMBOL_GPL(w1_reset_select_slave);
391 391
392/** 392/**
393 * When the workflow with a slave amongst many requires several
394 * successive commands a reset between each, this function is similar
395 * to doing a reset then a match ROM for the last matched ROM. The
396 * advantage being that the matched ROM step is skipped in favor of the
397 * resume command. The slave must support the command of course.
398 *
399 * If the bus has only one slave, traditionnaly the match ROM is skipped
400 * and a "SKIP ROM" is done for efficiency. On multi-slave busses, this
401 * doesn't work of course, but the resume command is the next best thing.
402 *
403 * The w1 master lock must be held.
404 *
405 * @param dev the master device
406 */
407int w1_reset_resume_command(struct w1_master *dev)
408{
409 if (w1_reset_bus(dev))
410 return -1;
411
412 /* This will make only the last matched slave perform a skip ROM. */
413 w1_write_8(dev, W1_RESUME_CMD);
414 return 0;
415}
416EXPORT_SYMBOL_GPL(w1_reset_resume_command);
417
418/**
393 * Put out a strong pull-up of the specified duration after the next write 419 * Put out a strong pull-up of the specified duration after the next write
394 * operation. Not all hardware supports strong pullups. Hardware that 420 * operation. Not all hardware supports strong pullups. Hardware that
395 * doesn't support strong pullups will sleep for the given time after the 421 * doesn't support strong pullups will sleep for the given time after the
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 7e667bc77ef2..55aabd927c60 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -55,6 +55,9 @@ static void w1_send_slave(struct w1_master *dev, u64 rn)
55 struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1); 55 struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1);
56 int avail; 56 int avail;
57 57
58 /* update kernel slave list */
59 w1_slave_found(dev, rn);
60
58 avail = dev->priv_size - cmd->len; 61 avail = dev->priv_size - cmd->len;
59 62
60 if (avail > 8) { 63 if (avail > 8) {
@@ -85,7 +88,7 @@ static int w1_process_search_command(struct w1_master *dev, struct cn_msg *msg,
85 dev->priv = msg; 88 dev->priv = msg;
86 dev->priv_size = avail; 89 dev->priv_size = avail;
87 90
88 w1_search_devices(dev, search_type, w1_send_slave); 91 w1_search_process_cb(dev, search_type, w1_send_slave);
89 92
90 msg->ack = 0; 93 msg->ack = 0;
91 cn_netlink_send(msg, 0, GFP_KERNEL); 94 cn_netlink_send(msg, 0, GFP_KERNEL);
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 31610ea73aec..9b72dcf1cd25 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -7,4 +7,4 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
7 extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ 7 extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
8 extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ 8 extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
9 export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \ 9 export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \
10 compression.o delayed-ref.o relocation.o 10 compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 44ea5b92e1ba..f66fc9959733 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -288,7 +288,7 @@ int btrfs_acl_chmod(struct inode *inode)
288 return 0; 288 return 0;
289 289
290 acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS); 290 acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS);
291 if (IS_ERR(acl) || !acl) 291 if (IS_ERR_OR_NULL(acl))
292 return PTR_ERR(acl); 292 return PTR_ERR(acl);
293 293
294 clone = posix_acl_clone(acl, GFP_KERNEL); 294 clone = posix_acl_clone(acl, GFP_KERNEL);
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 57c3bb2884ce..93b1aa932014 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -22,6 +22,7 @@
22#include "extent_map.h" 22#include "extent_map.h"
23#include "extent_io.h" 23#include "extent_io.h"
24#include "ordered-data.h" 24#include "ordered-data.h"
25#include "delayed-inode.h"
25 26
26/* in memory btrfs inode */ 27/* in memory btrfs inode */
27struct btrfs_inode { 28struct btrfs_inode {
@@ -152,20 +153,34 @@ struct btrfs_inode {
152 unsigned ordered_data_close:1; 153 unsigned ordered_data_close:1;
153 unsigned orphan_meta_reserved:1; 154 unsigned orphan_meta_reserved:1;
154 unsigned dummy_inode:1; 155 unsigned dummy_inode:1;
156 unsigned in_defrag:1;
155 157
156 /* 158 /*
157 * always compress this one file 159 * always compress this one file
158 */ 160 */
159 unsigned force_compress:4; 161 unsigned force_compress:4;
160 162
163 struct btrfs_delayed_node *delayed_node;
164
161 struct inode vfs_inode; 165 struct inode vfs_inode;
162}; 166};
163 167
168extern unsigned char btrfs_filetype_table[];
169
164static inline struct btrfs_inode *BTRFS_I(struct inode *inode) 170static inline struct btrfs_inode *BTRFS_I(struct inode *inode)
165{ 171{
166 return container_of(inode, struct btrfs_inode, vfs_inode); 172 return container_of(inode, struct btrfs_inode, vfs_inode);
167} 173}
168 174
175static inline u64 btrfs_ino(struct inode *inode)
176{
177 u64 ino = BTRFS_I(inode)->location.objectid;
178
179 if (ino <= BTRFS_FIRST_FREE_OBJECTID)
180 ino = inode->i_ino;
181 return ino;
182}
183
169static inline void btrfs_i_size_write(struct inode *inode, u64 size) 184static inline void btrfs_i_size_write(struct inode *inode, u64 size)
170{ 185{
171 i_size_write(inode, size); 186 i_size_write(inode, size);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 41d1d7c70e29..bfe42b03eaf9 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -125,9 +125,10 @@ static int check_compressed_csum(struct inode *inode,
125 kunmap_atomic(kaddr, KM_USER0); 125 kunmap_atomic(kaddr, KM_USER0);
126 126
127 if (csum != *cb_sum) { 127 if (csum != *cb_sum) {
128 printk(KERN_INFO "btrfs csum failed ino %lu " 128 printk(KERN_INFO "btrfs csum failed ino %llu "
129 "extent %llu csum %u " 129 "extent %llu csum %u "
130 "wanted %u mirror %d\n", inode->i_ino, 130 "wanted %u mirror %d\n",
131 (unsigned long long)btrfs_ino(inode),
131 (unsigned long long)disk_start, 132 (unsigned long long)disk_start,
132 csum, *cb_sum, cb->mirror_num); 133 csum, *cb_sum, cb->mirror_num);
133 ret = -EIO; 134 ret = -EIO;
@@ -332,7 +333,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
332 struct compressed_bio *cb; 333 struct compressed_bio *cb;
333 unsigned long bytes_left; 334 unsigned long bytes_left;
334 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 335 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
335 int page_index = 0; 336 int pg_index = 0;
336 struct page *page; 337 struct page *page;
337 u64 first_byte = disk_start; 338 u64 first_byte = disk_start;
338 struct block_device *bdev; 339 struct block_device *bdev;
@@ -366,8 +367,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
366 367
367 /* create and submit bios for the compressed pages */ 368 /* create and submit bios for the compressed pages */
368 bytes_left = compressed_len; 369 bytes_left = compressed_len;
369 for (page_index = 0; page_index < cb->nr_pages; page_index++) { 370 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
370 page = compressed_pages[page_index]; 371 page = compressed_pages[pg_index];
371 page->mapping = inode->i_mapping; 372 page->mapping = inode->i_mapping;
372 if (bio->bi_size) 373 if (bio->bi_size)
373 ret = io_tree->ops->merge_bio_hook(page, 0, 374 ret = io_tree->ops->merge_bio_hook(page, 0,
@@ -432,7 +433,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
432 struct compressed_bio *cb) 433 struct compressed_bio *cb)
433{ 434{
434 unsigned long end_index; 435 unsigned long end_index;
435 unsigned long page_index; 436 unsigned long pg_index;
436 u64 last_offset; 437 u64 last_offset;
437 u64 isize = i_size_read(inode); 438 u64 isize = i_size_read(inode);
438 int ret; 439 int ret;
@@ -456,13 +457,13 @@ static noinline int add_ra_bio_pages(struct inode *inode,
456 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 457 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
457 458
458 while (last_offset < compressed_end) { 459 while (last_offset < compressed_end) {
459 page_index = last_offset >> PAGE_CACHE_SHIFT; 460 pg_index = last_offset >> PAGE_CACHE_SHIFT;
460 461
461 if (page_index > end_index) 462 if (pg_index > end_index)
462 break; 463 break;
463 464
464 rcu_read_lock(); 465 rcu_read_lock();
465 page = radix_tree_lookup(&mapping->page_tree, page_index); 466 page = radix_tree_lookup(&mapping->page_tree, pg_index);
466 rcu_read_unlock(); 467 rcu_read_unlock();
467 if (page) { 468 if (page) {
468 misses++; 469 misses++;
@@ -476,7 +477,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
476 if (!page) 477 if (!page)
477 break; 478 break;
478 479
479 if (add_to_page_cache_lru(page, mapping, page_index, 480 if (add_to_page_cache_lru(page, mapping, pg_index,
480 GFP_NOFS)) { 481 GFP_NOFS)) {
481 page_cache_release(page); 482 page_cache_release(page);
482 goto next; 483 goto next;
@@ -560,7 +561,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
560 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; 561 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
561 unsigned long compressed_len; 562 unsigned long compressed_len;
562 unsigned long nr_pages; 563 unsigned long nr_pages;
563 unsigned long page_index; 564 unsigned long pg_index;
564 struct page *page; 565 struct page *page;
565 struct block_device *bdev; 566 struct block_device *bdev;
566 struct bio *comp_bio; 567 struct bio *comp_bio;
@@ -613,10 +614,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
613 614
614 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 615 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
615 616
616 for (page_index = 0; page_index < nr_pages; page_index++) { 617 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
617 cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | 618 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
618 __GFP_HIGHMEM); 619 __GFP_HIGHMEM);
619 if (!cb->compressed_pages[page_index]) 620 if (!cb->compressed_pages[pg_index])
620 goto fail2; 621 goto fail2;
621 } 622 }
622 cb->nr_pages = nr_pages; 623 cb->nr_pages = nr_pages;
@@ -634,8 +635,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
634 comp_bio->bi_end_io = end_compressed_bio_read; 635 comp_bio->bi_end_io = end_compressed_bio_read;
635 atomic_inc(&cb->pending_bios); 636 atomic_inc(&cb->pending_bios);
636 637
637 for (page_index = 0; page_index < nr_pages; page_index++) { 638 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
638 page = cb->compressed_pages[page_index]; 639 page = cb->compressed_pages[pg_index];
639 page->mapping = inode->i_mapping; 640 page->mapping = inode->i_mapping;
640 page->index = em_start >> PAGE_CACHE_SHIFT; 641 page->index = em_start >> PAGE_CACHE_SHIFT;
641 642
@@ -702,8 +703,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
702 return 0; 703 return 0;
703 704
704fail2: 705fail2:
705 for (page_index = 0; page_index < nr_pages; page_index++) 706 for (pg_index = 0; pg_index < nr_pages; pg_index++)
706 free_page((unsigned long)cb->compressed_pages[page_index]); 707 free_page((unsigned long)cb->compressed_pages[pg_index]);
707 708
708 kfree(cb->compressed_pages); 709 kfree(cb->compressed_pages);
709fail1: 710fail1:
@@ -945,7 +946,7 @@ void btrfs_exit_compress(void)
945int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, 946int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
946 unsigned long total_out, u64 disk_start, 947 unsigned long total_out, u64 disk_start,
947 struct bio_vec *bvec, int vcnt, 948 struct bio_vec *bvec, int vcnt,
948 unsigned long *page_index, 949 unsigned long *pg_index,
949 unsigned long *pg_offset) 950 unsigned long *pg_offset)
950{ 951{
951 unsigned long buf_offset; 952 unsigned long buf_offset;
@@ -954,7 +955,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
954 unsigned long working_bytes = total_out - buf_start; 955 unsigned long working_bytes = total_out - buf_start;
955 unsigned long bytes; 956 unsigned long bytes;
956 char *kaddr; 957 char *kaddr;
957 struct page *page_out = bvec[*page_index].bv_page; 958 struct page *page_out = bvec[*pg_index].bv_page;
958 959
959 /* 960 /*
960 * start byte is the first byte of the page we're currently 961 * start byte is the first byte of the page we're currently
@@ -995,11 +996,11 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
995 996
996 /* check if we need to pick another page */ 997 /* check if we need to pick another page */
997 if (*pg_offset == PAGE_CACHE_SIZE) { 998 if (*pg_offset == PAGE_CACHE_SIZE) {
998 (*page_index)++; 999 (*pg_index)++;
999 if (*page_index >= vcnt) 1000 if (*pg_index >= vcnt)
1000 return 0; 1001 return 0;
1001 1002
1002 page_out = bvec[*page_index].bv_page; 1003 page_out = bvec[*pg_index].bv_page;
1003 *pg_offset = 0; 1004 *pg_offset = 0;
1004 start_byte = page_offset(page_out) - disk_start; 1005 start_byte = page_offset(page_out) - disk_start;
1005 1006
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 51000174b9d7..a12059f4f0fd 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -37,7 +37,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
37int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, 37int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
38 unsigned long total_out, u64 disk_start, 38 unsigned long total_out, u64 disk_start,
39 struct bio_vec *bvec, int vcnt, 39 struct bio_vec *bvec, int vcnt,
40 unsigned long *page_index, 40 unsigned long *pg_index,
41 unsigned long *pg_offset); 41 unsigned long *pg_offset);
42 42
43int btrfs_submit_compressed_write(struct inode *inode, u64 start, 43int btrfs_submit_compressed_write(struct inode *inode, u64 start,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 84d7ca1fe0ba..b0e18d986e0a 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -38,11 +38,6 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
38 struct extent_buffer *src_buf); 38 struct extent_buffer *src_buf);
39static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, 39static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
40 struct btrfs_path *path, int level, int slot); 40 struct btrfs_path *path, int level, int slot);
41static int setup_items_for_insert(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root, struct btrfs_path *path,
43 struct btrfs_key *cpu_key, u32 *data_size,
44 u32 total_data, u32 total_size, int nr);
45
46 41
47struct btrfs_path *btrfs_alloc_path(void) 42struct btrfs_path *btrfs_alloc_path(void)
48{ 43{
@@ -107,7 +102,7 @@ void btrfs_free_path(struct btrfs_path *p)
107{ 102{
108 if (!p) 103 if (!p)
109 return; 104 return;
110 btrfs_release_path(NULL, p); 105 btrfs_release_path(p);
111 kmem_cache_free(btrfs_path_cachep, p); 106 kmem_cache_free(btrfs_path_cachep, p);
112} 107}
113 108
@@ -117,7 +112,7 @@ void btrfs_free_path(struct btrfs_path *p)
117 * 112 *
118 * It is safe to call this on paths that no locks or extent buffers held. 113 * It is safe to call this on paths that no locks or extent buffers held.
119 */ 114 */
120noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p) 115noinline void btrfs_release_path(struct btrfs_path *p)
121{ 116{
122 int i; 117 int i;
123 118
@@ -1328,7 +1323,7 @@ static noinline int reada_for_balance(struct btrfs_root *root,
1328 ret = -EAGAIN; 1323 ret = -EAGAIN;
1329 1324
1330 /* release the whole path */ 1325 /* release the whole path */
1331 btrfs_release_path(root, path); 1326 btrfs_release_path(path);
1332 1327
1333 /* read the blocks */ 1328 /* read the blocks */
1334 if (block1) 1329 if (block1)
@@ -1475,7 +1470,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
1475 return 0; 1470 return 0;
1476 } 1471 }
1477 free_extent_buffer(tmp); 1472 free_extent_buffer(tmp);
1478 btrfs_release_path(NULL, p); 1473 btrfs_release_path(p);
1479 return -EIO; 1474 return -EIO;
1480 } 1475 }
1481 } 1476 }
@@ -1494,7 +1489,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
1494 if (p->reada) 1489 if (p->reada)
1495 reada_for_search(root, p, level, slot, key->objectid); 1490 reada_for_search(root, p, level, slot, key->objectid);
1496 1491
1497 btrfs_release_path(NULL, p); 1492 btrfs_release_path(p);
1498 1493
1499 ret = -EAGAIN; 1494 ret = -EAGAIN;
1500 tmp = read_tree_block(root, blocknr, blocksize, 0); 1495 tmp = read_tree_block(root, blocknr, blocksize, 0);
@@ -1563,7 +1558,7 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
1563 } 1558 }
1564 b = p->nodes[level]; 1559 b = p->nodes[level];
1565 if (!b) { 1560 if (!b) {
1566 btrfs_release_path(NULL, p); 1561 btrfs_release_path(p);
1567 goto again; 1562 goto again;
1568 } 1563 }
1569 BUG_ON(btrfs_header_nritems(b) == 1); 1564 BUG_ON(btrfs_header_nritems(b) == 1);
@@ -1753,7 +1748,7 @@ done:
1753 if (!p->leave_spinning) 1748 if (!p->leave_spinning)
1754 btrfs_set_path_blocking(p); 1749 btrfs_set_path_blocking(p);
1755 if (ret < 0) 1750 if (ret < 0)
1756 btrfs_release_path(root, p); 1751 btrfs_release_path(p);
1757 return ret; 1752 return ret;
1758} 1753}
1759 1754
@@ -3026,7 +3021,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3026 struct btrfs_file_extent_item); 3021 struct btrfs_file_extent_item);
3027 extent_len = btrfs_file_extent_num_bytes(leaf, fi); 3022 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3028 } 3023 }
3029 btrfs_release_path(root, path); 3024 btrfs_release_path(path);
3030 3025
3031 path->keep_locks = 1; 3026 path->keep_locks = 1;
3032 path->search_for_split = 1; 3027 path->search_for_split = 1;
@@ -3216,7 +3211,6 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans,
3216 struct btrfs_path *path, 3211 struct btrfs_path *path,
3217 u32 new_size, int from_end) 3212 u32 new_size, int from_end)
3218{ 3213{
3219 int ret = 0;
3220 int slot; 3214 int slot;
3221 struct extent_buffer *leaf; 3215 struct extent_buffer *leaf;
3222 struct btrfs_item *item; 3216 struct btrfs_item *item;
@@ -3314,12 +3308,11 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans,
3314 btrfs_set_item_size(leaf, item, new_size); 3308 btrfs_set_item_size(leaf, item, new_size);
3315 btrfs_mark_buffer_dirty(leaf); 3309 btrfs_mark_buffer_dirty(leaf);
3316 3310
3317 ret = 0;
3318 if (btrfs_leaf_free_space(root, leaf) < 0) { 3311 if (btrfs_leaf_free_space(root, leaf) < 0) {
3319 btrfs_print_leaf(root, leaf); 3312 btrfs_print_leaf(root, leaf);
3320 BUG(); 3313 BUG();
3321 } 3314 }
3322 return ret; 3315 return 0;
3323} 3316}
3324 3317
3325/* 3318/*
@@ -3329,7 +3322,6 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans,
3329 struct btrfs_root *root, struct btrfs_path *path, 3322 struct btrfs_root *root, struct btrfs_path *path,
3330 u32 data_size) 3323 u32 data_size)
3331{ 3324{
3332 int ret = 0;
3333 int slot; 3325 int slot;
3334 struct extent_buffer *leaf; 3326 struct extent_buffer *leaf;
3335 struct btrfs_item *item; 3327 struct btrfs_item *item;
@@ -3394,12 +3386,11 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans,
3394 btrfs_set_item_size(leaf, item, old_size + data_size); 3386 btrfs_set_item_size(leaf, item, old_size + data_size);
3395 btrfs_mark_buffer_dirty(leaf); 3387 btrfs_mark_buffer_dirty(leaf);
3396 3388
3397 ret = 0;
3398 if (btrfs_leaf_free_space(root, leaf) < 0) { 3389 if (btrfs_leaf_free_space(root, leaf) < 0) {
3399 btrfs_print_leaf(root, leaf); 3390 btrfs_print_leaf(root, leaf);
3400 BUG(); 3391 BUG();
3401 } 3392 }
3402 return ret; 3393 return 0;
3403} 3394}
3404 3395
3405/* 3396/*
@@ -3559,11 +3550,10 @@ out:
3559 * to save stack depth by doing the bulk of the work in a function 3550 * to save stack depth by doing the bulk of the work in a function
3560 * that doesn't call btrfs_search_slot 3551 * that doesn't call btrfs_search_slot
3561 */ 3552 */
3562static noinline_for_stack int 3553int setup_items_for_insert(struct btrfs_trans_handle *trans,
3563setup_items_for_insert(struct btrfs_trans_handle *trans, 3554 struct btrfs_root *root, struct btrfs_path *path,
3564 struct btrfs_root *root, struct btrfs_path *path, 3555 struct btrfs_key *cpu_key, u32 *data_size,
3565 struct btrfs_key *cpu_key, u32 *data_size, 3556 u32 total_data, u32 total_size, int nr)
3566 u32 total_data, u32 total_size, int nr)
3567{ 3557{
3568 struct btrfs_item *item; 3558 struct btrfs_item *item;
3569 int i; 3559 int i;
@@ -3647,7 +3637,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
3647 3637
3648 ret = 0; 3638 ret = 0;
3649 if (slot == 0) { 3639 if (slot == 0) {
3650 struct btrfs_disk_key disk_key;
3651 btrfs_cpu_key_to_disk(&disk_key, cpu_key); 3640 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3652 ret = fixup_low_keys(trans, root, path, &disk_key, 1); 3641 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3653 } 3642 }
@@ -3949,7 +3938,7 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
3949 else 3938 else
3950 return 1; 3939 return 1;
3951 3940
3952 btrfs_release_path(root, path); 3941 btrfs_release_path(path);
3953 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3942 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3954 if (ret < 0) 3943 if (ret < 0)
3955 return ret; 3944 return ret;
@@ -4073,7 +4062,7 @@ find_next_key:
4073 sret = btrfs_find_next_key(root, path, min_key, level, 4062 sret = btrfs_find_next_key(root, path, min_key, level,
4074 cache_only, min_trans); 4063 cache_only, min_trans);
4075 if (sret == 0) { 4064 if (sret == 0) {
4076 btrfs_release_path(root, path); 4065 btrfs_release_path(path);
4077 goto again; 4066 goto again;
4078 } else { 4067 } else {
4079 goto out; 4068 goto out;
@@ -4152,7 +4141,7 @@ next:
4152 btrfs_node_key_to_cpu(c, &cur_key, slot); 4141 btrfs_node_key_to_cpu(c, &cur_key, slot);
4153 4142
4154 orig_lowest = path->lowest_level; 4143 orig_lowest = path->lowest_level;
4155 btrfs_release_path(root, path); 4144 btrfs_release_path(path);
4156 path->lowest_level = level; 4145 path->lowest_level = level;
4157 ret = btrfs_search_slot(NULL, root, &cur_key, path, 4146 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4158 0, 0); 4147 0, 0);
@@ -4229,7 +4218,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
4229again: 4218again:
4230 level = 1; 4219 level = 1;
4231 next = NULL; 4220 next = NULL;
4232 btrfs_release_path(root, path); 4221 btrfs_release_path(path);
4233 4222
4234 path->keep_locks = 1; 4223 path->keep_locks = 1;
4235 4224
@@ -4285,7 +4274,7 @@ again:
4285 goto again; 4274 goto again;
4286 4275
4287 if (ret < 0) { 4276 if (ret < 0) {
4288 btrfs_release_path(root, path); 4277 btrfs_release_path(path);
4289 goto done; 4278 goto done;
4290 } 4279 }
4291 4280
@@ -4324,7 +4313,7 @@ again:
4324 goto again; 4313 goto again;
4325 4314
4326 if (ret < 0) { 4315 if (ret < 0) {
4327 btrfs_release_path(root, path); 4316 btrfs_release_path(path);
4328 goto done; 4317 goto done;
4329 } 4318 }
4330 4319
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 8f4b81de3ae2..332323e19dd1 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -23,6 +23,7 @@
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/highmem.h> 24#include <linux/highmem.h>
25#include <linux/fs.h> 25#include <linux/fs.h>
26#include <linux/rwsem.h>
26#include <linux/completion.h> 27#include <linux/completion.h>
27#include <linux/backing-dev.h> 28#include <linux/backing-dev.h>
28#include <linux/wait.h> 29#include <linux/wait.h>
@@ -33,6 +34,7 @@
33#include "extent_io.h" 34#include "extent_io.h"
34#include "extent_map.h" 35#include "extent_map.h"
35#include "async-thread.h" 36#include "async-thread.h"
37#include "ioctl.h"
36 38
37struct btrfs_trans_handle; 39struct btrfs_trans_handle;
38struct btrfs_transaction; 40struct btrfs_transaction;
@@ -105,6 +107,12 @@ struct btrfs_ordered_sum;
105/* For storing free space cache */ 107/* For storing free space cache */
106#define BTRFS_FREE_SPACE_OBJECTID -11ULL 108#define BTRFS_FREE_SPACE_OBJECTID -11ULL
107 109
110/*
111 * The inode number assigned to the special inode for sotring
112 * free ino cache
113 */
114#define BTRFS_FREE_INO_OBJECTID -12ULL
115
108/* dummy objectid represents multiple objectids */ 116/* dummy objectid represents multiple objectids */
109#define BTRFS_MULTIPLE_OBJECTIDS -255ULL 117#define BTRFS_MULTIPLE_OBJECTIDS -255ULL
110 118
@@ -187,7 +195,6 @@ struct btrfs_mapping_tree {
187 struct extent_map_tree map_tree; 195 struct extent_map_tree map_tree;
188}; 196};
189 197
190#define BTRFS_UUID_SIZE 16
191struct btrfs_dev_item { 198struct btrfs_dev_item {
192 /* the internal btrfs device id */ 199 /* the internal btrfs device id */
193 __le64 devid; 200 __le64 devid;
@@ -294,7 +301,6 @@ static inline unsigned long btrfs_chunk_item_size(int num_stripes)
294 sizeof(struct btrfs_stripe) * (num_stripes - 1); 301 sizeof(struct btrfs_stripe) * (num_stripes - 1);
295} 302}
296 303
297#define BTRFS_FSID_SIZE 16
298#define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0) 304#define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0)
299#define BTRFS_HEADER_FLAG_RELOC (1ULL << 1) 305#define BTRFS_HEADER_FLAG_RELOC (1ULL << 1)
300 306
@@ -510,6 +516,12 @@ struct btrfs_extent_item_v0 {
510/* use full backrefs for extent pointers in the block */ 516/* use full backrefs for extent pointers in the block */
511#define BTRFS_BLOCK_FLAG_FULL_BACKREF (1ULL << 8) 517#define BTRFS_BLOCK_FLAG_FULL_BACKREF (1ULL << 8)
512 518
519/*
520 * this flag is only used internally by scrub and may be changed at any time
521 * it is only declared here to avoid collisions
522 */
523#define BTRFS_EXTENT_FLAG_SUPER (1ULL << 48)
524
513struct btrfs_tree_block_info { 525struct btrfs_tree_block_info {
514 struct btrfs_disk_key key; 526 struct btrfs_disk_key key;
515 u8 level; 527 u8 level;
@@ -740,12 +752,12 @@ struct btrfs_space_info {
740 */ 752 */
741 unsigned long reservation_progress; 753 unsigned long reservation_progress;
742 754
743 int full:1; /* indicates that we cannot allocate any more 755 unsigned int full:1; /* indicates that we cannot allocate any more
744 chunks for this space */ 756 chunks for this space */
745 int chunk_alloc:1; /* set if we are allocating a chunk */ 757 unsigned int chunk_alloc:1; /* set if we are allocating a chunk */
746 758
747 int force_alloc; /* set if we need to force a chunk alloc for 759 unsigned int force_alloc; /* set if we need to force a chunk
748 this space */ 760 alloc for this space */
749 761
750 struct list_head list; 762 struct list_head list;
751 763
@@ -830,9 +842,6 @@ struct btrfs_block_group_cache {
830 u64 bytes_super; 842 u64 bytes_super;
831 u64 flags; 843 u64 flags;
832 u64 sectorsize; 844 u64 sectorsize;
833 int extents_thresh;
834 int free_extents;
835 int total_bitmaps;
836 unsigned int ro:1; 845 unsigned int ro:1;
837 unsigned int dirty:1; 846 unsigned int dirty:1;
838 unsigned int iref:1; 847 unsigned int iref:1;
@@ -847,9 +856,7 @@ struct btrfs_block_group_cache {
847 struct btrfs_space_info *space_info; 856 struct btrfs_space_info *space_info;
848 857
849 /* free space cache stuff */ 858 /* free space cache stuff */
850 spinlock_t tree_lock; 859 struct btrfs_free_space_ctl *free_space_ctl;
851 struct rb_root free_space_offset;
852 u64 free_space;
853 860
854 /* block group cache stuff */ 861 /* block group cache stuff */
855 struct rb_node cache_node; 862 struct rb_node cache_node;
@@ -869,6 +876,7 @@ struct btrfs_block_group_cache {
869struct reloc_control; 876struct reloc_control;
870struct btrfs_device; 877struct btrfs_device;
871struct btrfs_fs_devices; 878struct btrfs_fs_devices;
879struct btrfs_delayed_root;
872struct btrfs_fs_info { 880struct btrfs_fs_info {
873 u8 fsid[BTRFS_FSID_SIZE]; 881 u8 fsid[BTRFS_FSID_SIZE];
874 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; 882 u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
@@ -895,7 +903,10 @@ struct btrfs_fs_info {
895 /* logical->physical extent mapping */ 903 /* logical->physical extent mapping */
896 struct btrfs_mapping_tree mapping_tree; 904 struct btrfs_mapping_tree mapping_tree;
897 905
898 /* block reservation for extent, checksum and root tree */ 906 /*
907 * block reservation for extent, checksum, root tree and
908 * delayed dir index item
909 */
899 struct btrfs_block_rsv global_block_rsv; 910 struct btrfs_block_rsv global_block_rsv;
900 /* block reservation for delay allocation */ 911 /* block reservation for delay allocation */
901 struct btrfs_block_rsv delalloc_block_rsv; 912 struct btrfs_block_rsv delalloc_block_rsv;
@@ -1022,6 +1033,7 @@ struct btrfs_fs_info {
1022 * for the sys_munmap function call path 1033 * for the sys_munmap function call path
1023 */ 1034 */
1024 struct btrfs_workers fixup_workers; 1035 struct btrfs_workers fixup_workers;
1036 struct btrfs_workers delayed_workers;
1025 struct task_struct *transaction_kthread; 1037 struct task_struct *transaction_kthread;
1026 struct task_struct *cleaner_kthread; 1038 struct task_struct *cleaner_kthread;
1027 int thread_pool_size; 1039 int thread_pool_size;
@@ -1062,6 +1074,11 @@ struct btrfs_fs_info {
1062 /* all metadata allocations go through this cluster */ 1074 /* all metadata allocations go through this cluster */
1063 struct btrfs_free_cluster meta_alloc_cluster; 1075 struct btrfs_free_cluster meta_alloc_cluster;
1064 1076
1077 /* auto defrag inodes go here */
1078 spinlock_t defrag_inodes_lock;
1079 struct rb_root defrag_inodes;
1080 atomic_t defrag_running;
1081
1065 spinlock_t ref_cache_lock; 1082 spinlock_t ref_cache_lock;
1066 u64 total_ref_cache_size; 1083 u64 total_ref_cache_size;
1067 1084
@@ -1077,8 +1094,21 @@ struct btrfs_fs_info {
1077 1094
1078 void *bdev_holder; 1095 void *bdev_holder;
1079 1096
1097 /* private scrub information */
1098 struct mutex scrub_lock;
1099 atomic_t scrubs_running;
1100 atomic_t scrub_pause_req;
1101 atomic_t scrubs_paused;
1102 atomic_t scrub_cancel_req;
1103 wait_queue_head_t scrub_pause_wait;
1104 struct rw_semaphore scrub_super_lock;
1105 int scrub_workers_refcnt;
1106 struct btrfs_workers scrub_workers;
1107
1080 /* filesystem state */ 1108 /* filesystem state */
1081 u64 fs_state; 1109 u64 fs_state;
1110
1111 struct btrfs_delayed_root *delayed_root;
1082}; 1112};
1083 1113
1084/* 1114/*
@@ -1088,9 +1118,6 @@ struct btrfs_fs_info {
1088struct btrfs_root { 1118struct btrfs_root {
1089 struct extent_buffer *node; 1119 struct extent_buffer *node;
1090 1120
1091 /* the node lock is held while changing the node pointer */
1092 spinlock_t node_lock;
1093
1094 struct extent_buffer *commit_root; 1121 struct extent_buffer *commit_root;
1095 struct btrfs_root *log_root; 1122 struct btrfs_root *log_root;
1096 struct btrfs_root *reloc_root; 1123 struct btrfs_root *reloc_root;
@@ -1107,6 +1134,16 @@ struct btrfs_root {
1107 spinlock_t accounting_lock; 1134 spinlock_t accounting_lock;
1108 struct btrfs_block_rsv *block_rsv; 1135 struct btrfs_block_rsv *block_rsv;
1109 1136
1137 /* free ino cache stuff */
1138 struct mutex fs_commit_mutex;
1139 struct btrfs_free_space_ctl *free_ino_ctl;
1140 enum btrfs_caching_type cached;
1141 spinlock_t cache_lock;
1142 wait_queue_head_t cache_wait;
1143 struct btrfs_free_space_ctl *free_ino_pinned;
1144 u64 cache_progress;
1145 struct inode *cache_inode;
1146
1110 struct mutex log_mutex; 1147 struct mutex log_mutex;
1111 wait_queue_head_t log_writer_wait; 1148 wait_queue_head_t log_writer_wait;
1112 wait_queue_head_t log_commit_wait[2]; 1149 wait_queue_head_t log_commit_wait[2];
@@ -1162,12 +1199,49 @@ struct btrfs_root {
1162 struct rb_root inode_tree; 1199 struct rb_root inode_tree;
1163 1200
1164 /* 1201 /*
1202 * radix tree that keeps track of delayed nodes of every inode,
1203 * protected by inode_lock
1204 */
1205 struct radix_tree_root delayed_nodes_tree;
1206 /*
1165 * right now this just gets used so that a root has its own devid 1207 * right now this just gets used so that a root has its own devid
1166 * for stat. It may be used for more later 1208 * for stat. It may be used for more later
1167 */ 1209 */
1168 struct super_block anon_super; 1210 struct super_block anon_super;
1169}; 1211};
1170 1212
1213struct btrfs_ioctl_defrag_range_args {
1214 /* start of the defrag operation */
1215 __u64 start;
1216
1217 /* number of bytes to defrag, use (u64)-1 to say all */
1218 __u64 len;
1219
1220 /*
1221 * flags for the operation, which can include turning
1222 * on compression for this one defrag
1223 */
1224 __u64 flags;
1225
1226 /*
1227 * any extent bigger than this will be considered
1228 * already defragged. Use 0 to take the kernel default
1229 * Use 1 to say every single extent must be rewritten
1230 */
1231 __u32 extent_thresh;
1232
1233 /*
1234 * which compression method to use if turning on compression
1235 * for this defrag operation. If unspecified, zlib will
1236 * be used
1237 */
1238 __u32 compress_type;
1239
1240 /* spare for later */
1241 __u32 unused[4];
1242};
1243
1244
1171/* 1245/*
1172 * inode items have the data typically returned from stat and store other 1246 * inode items have the data typically returned from stat and store other
1173 * info about object characteristics. There is one for every file and dir in 1247 * info about object characteristics. There is one for every file and dir in
@@ -1265,6 +1339,7 @@ struct btrfs_root {
1265#define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) 1339#define BTRFS_MOUNT_CLEAR_CACHE (1 << 13)
1266#define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) 1340#define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14)
1267#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) 1341#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15)
1342#define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16)
1268 1343
1269#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) 1344#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
1270#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) 1345#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
@@ -1440,26 +1515,12 @@ static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb,
1440 return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); 1515 return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr));
1441} 1516}
1442 1517
1443static inline void btrfs_set_stripe_offset_nr(struct extent_buffer *eb,
1444 struct btrfs_chunk *c, int nr,
1445 u64 val)
1446{
1447 btrfs_set_stripe_offset(eb, btrfs_stripe_nr(c, nr), val);
1448}
1449
1450static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, 1518static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb,
1451 struct btrfs_chunk *c, int nr) 1519 struct btrfs_chunk *c, int nr)
1452{ 1520{
1453 return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); 1521 return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr));
1454} 1522}
1455 1523
1456static inline void btrfs_set_stripe_devid_nr(struct extent_buffer *eb,
1457 struct btrfs_chunk *c, int nr,
1458 u64 val)
1459{
1460 btrfs_set_stripe_devid(eb, btrfs_stripe_nr(c, nr), val);
1461}
1462
1463/* struct btrfs_block_group_item */ 1524/* struct btrfs_block_group_item */
1464BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item, 1525BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item,
1465 used, 64); 1526 used, 64);
@@ -1517,14 +1578,6 @@ btrfs_inode_ctime(struct btrfs_inode_item *inode_item)
1517 return (struct btrfs_timespec *)ptr; 1578 return (struct btrfs_timespec *)ptr;
1518} 1579}
1519 1580
1520static inline struct btrfs_timespec *
1521btrfs_inode_otime(struct btrfs_inode_item *inode_item)
1522{
1523 unsigned long ptr = (unsigned long)inode_item;
1524 ptr += offsetof(struct btrfs_inode_item, otime);
1525 return (struct btrfs_timespec *)ptr;
1526}
1527
1528BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64); 1581BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64);
1529BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32); 1582BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32);
1530 1583
@@ -1875,33 +1928,6 @@ static inline u8 *btrfs_header_chunk_tree_uuid(struct extent_buffer *eb)
1875 return (u8 *)ptr; 1928 return (u8 *)ptr;
1876} 1929}
1877 1930
1878static inline u8 *btrfs_super_fsid(struct extent_buffer *eb)
1879{
1880 unsigned long ptr = offsetof(struct btrfs_super_block, fsid);
1881 return (u8 *)ptr;
1882}
1883
1884static inline u8 *btrfs_header_csum(struct extent_buffer *eb)
1885{
1886 unsigned long ptr = offsetof(struct btrfs_header, csum);
1887 return (u8 *)ptr;
1888}
1889
1890static inline struct btrfs_node *btrfs_buffer_node(struct extent_buffer *eb)
1891{
1892 return NULL;
1893}
1894
1895static inline struct btrfs_leaf *btrfs_buffer_leaf(struct extent_buffer *eb)
1896{
1897 return NULL;
1898}
1899
1900static inline struct btrfs_header *btrfs_buffer_header(struct extent_buffer *eb)
1901{
1902 return NULL;
1903}
1904
1905static inline int btrfs_is_leaf(struct extent_buffer *eb) 1931static inline int btrfs_is_leaf(struct extent_buffer *eb)
1906{ 1932{
1907 return btrfs_header_level(eb) == 0; 1933 return btrfs_header_level(eb) == 0;
@@ -2055,22 +2081,6 @@ static inline struct btrfs_root *btrfs_sb(struct super_block *sb)
2055 return sb->s_fs_info; 2081 return sb->s_fs_info;
2056} 2082}
2057 2083
2058static inline int btrfs_set_root_name(struct btrfs_root *root,
2059 const char *name, int len)
2060{
2061 /* if we already have a name just free it */
2062 kfree(root->name);
2063
2064 root->name = kmalloc(len+1, GFP_KERNEL);
2065 if (!root->name)
2066 return -ENOMEM;
2067
2068 memcpy(root->name, name, len);
2069 root->name[len] = '\0';
2070
2071 return 0;
2072}
2073
2074static inline u32 btrfs_level_size(struct btrfs_root *root, int level) 2084static inline u32 btrfs_level_size(struct btrfs_root *root, int level)
2075{ 2085{
2076 if (level == 0) 2086 if (level == 0)
@@ -2099,6 +2109,13 @@ static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info)
2099} 2109}
2100 2110
2101/* extent-tree.c */ 2111/* extent-tree.c */
2112static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root,
2113 int num_items)
2114{
2115 return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
2116 3 * num_items;
2117}
2118
2102void btrfs_put_block_group(struct btrfs_block_group_cache *cache); 2119void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
2103int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 2120int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2104 struct btrfs_root *root, unsigned long count); 2121 struct btrfs_root *root, unsigned long count);
@@ -2108,12 +2125,9 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
2108 u64 num_bytes, u64 *refs, u64 *flags); 2125 u64 num_bytes, u64 *refs, u64 *flags);
2109int btrfs_pin_extent(struct btrfs_root *root, 2126int btrfs_pin_extent(struct btrfs_root *root,
2110 u64 bytenr, u64 num, int reserved); 2127 u64 bytenr, u64 num, int reserved);
2111int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
2112 struct btrfs_root *root, struct extent_buffer *leaf);
2113int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, 2128int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2114 struct btrfs_root *root, 2129 struct btrfs_root *root,
2115 u64 objectid, u64 offset, u64 bytenr); 2130 u64 objectid, u64 offset, u64 bytenr);
2116int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy);
2117struct btrfs_block_group_cache *btrfs_lookup_block_group( 2131struct btrfs_block_group_cache *btrfs_lookup_block_group(
2118 struct btrfs_fs_info *info, 2132 struct btrfs_fs_info *info,
2119 u64 bytenr); 2133 u64 bytenr);
@@ -2290,10 +2304,12 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
2290 struct btrfs_root *root, struct extent_buffer *parent, 2304 struct btrfs_root *root, struct extent_buffer *parent,
2291 int start_slot, int cache_only, u64 *last_ret, 2305 int start_slot, int cache_only, u64 *last_ret,
2292 struct btrfs_key *progress); 2306 struct btrfs_key *progress);
2293void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p); 2307void btrfs_release_path(struct btrfs_path *p);
2294struct btrfs_path *btrfs_alloc_path(void); 2308struct btrfs_path *btrfs_alloc_path(void);
2295void btrfs_free_path(struct btrfs_path *p); 2309void btrfs_free_path(struct btrfs_path *p);
2296void btrfs_set_path_blocking(struct btrfs_path *p); 2310void btrfs_set_path_blocking(struct btrfs_path *p);
2311void btrfs_clear_path_blocking(struct btrfs_path *p,
2312 struct extent_buffer *held);
2297void btrfs_unlock_up_safe(struct btrfs_path *p, int level); 2313void btrfs_unlock_up_safe(struct btrfs_path *p, int level);
2298 2314
2299int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2315int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
@@ -2305,13 +2321,12 @@ static inline int btrfs_del_item(struct btrfs_trans_handle *trans,
2305 return btrfs_del_items(trans, root, path, path->slots[0], 1); 2321 return btrfs_del_items(trans, root, path, path->slots[0], 1);
2306} 2322}
2307 2323
2324int setup_items_for_insert(struct btrfs_trans_handle *trans,
2325 struct btrfs_root *root, struct btrfs_path *path,
2326 struct btrfs_key *cpu_key, u32 *data_size,
2327 u32 total_data, u32 total_size, int nr);
2308int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root 2328int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
2309 *root, struct btrfs_key *key, void *data, u32 data_size); 2329 *root, struct btrfs_key *key, void *data, u32 data_size);
2310int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
2311 struct btrfs_root *root,
2312 struct btrfs_path *path,
2313 struct btrfs_key *cpu_key, u32 *data_size,
2314 int nr);
2315int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 2330int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
2316 struct btrfs_root *root, 2331 struct btrfs_root *root,
2317 struct btrfs_path *path, 2332 struct btrfs_path *path,
@@ -2357,8 +2372,6 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
2357 *item); 2372 *item);
2358int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct 2373int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
2359 btrfs_root_item *item, struct btrfs_key *key); 2374 btrfs_root_item *item, struct btrfs_key *key);
2360int btrfs_search_root(struct btrfs_root *root, u64 search_start,
2361 u64 *found_objectid);
2362int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid); 2375int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
2363int btrfs_find_orphan_roots(struct btrfs_root *tree_root); 2376int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
2364int btrfs_set_root_node(struct btrfs_root_item *item, 2377int btrfs_set_root_node(struct btrfs_root_item *item,
@@ -2368,7 +2381,7 @@ void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
2368/* dir-item.c */ 2381/* dir-item.c */
2369int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, 2382int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
2370 struct btrfs_root *root, const char *name, 2383 struct btrfs_root *root, const char *name,
2371 int name_len, u64 dir, 2384 int name_len, struct inode *dir,
2372 struct btrfs_key *location, u8 type, u64 index); 2385 struct btrfs_key *location, u8 type, u64 index);
2373struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, 2386struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
2374 struct btrfs_root *root, 2387 struct btrfs_root *root,
@@ -2413,12 +2426,6 @@ int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
2413 struct btrfs_root *root, u64 offset); 2426 struct btrfs_root *root, u64 offset);
2414int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset); 2427int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset);
2415 2428
2416/* inode-map.c */
2417int btrfs_find_free_objectid(struct btrfs_trans_handle *trans,
2418 struct btrfs_root *fs_root,
2419 u64 dirid, u64 *objectid);
2420int btrfs_find_highest_inode(struct btrfs_root *fs_root, u64 *objectid);
2421
2422/* inode-item.c */ 2429/* inode-item.c */
2423int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, 2430int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
2424 struct btrfs_root *root, 2431 struct btrfs_root *root,
@@ -2463,8 +2470,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
2463 struct btrfs_ordered_sum *sums); 2470 struct btrfs_ordered_sum *sums);
2464int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, 2471int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
2465 struct bio *bio, u64 file_start, int contig); 2472 struct bio *bio, u64 file_start, int contig);
2466int btrfs_csum_file_bytes(struct btrfs_root *root, struct inode *inode,
2467 u64 start, unsigned long len);
2468struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, 2473struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
2469 struct btrfs_root *root, 2474 struct btrfs_root *root,
2470 struct btrfs_path *path, 2475 struct btrfs_path *path,
@@ -2472,8 +2477,8 @@ struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
2472int btrfs_csum_truncate(struct btrfs_trans_handle *trans, 2477int btrfs_csum_truncate(struct btrfs_trans_handle *trans,
2473 struct btrfs_root *root, struct btrfs_path *path, 2478 struct btrfs_root *root, struct btrfs_path *path,
2474 u64 isize); 2479 u64 isize);
2475int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, 2480int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
2476 u64 end, struct list_head *list); 2481 struct list_head *list, int search_commit);
2477/* inode.c */ 2482/* inode.c */
2478 2483
2479/* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */ 2484/* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */
@@ -2502,8 +2507,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2502 u32 min_type); 2507 u32 min_type);
2503 2508
2504int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); 2509int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
2505int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput,
2506 int sync);
2507int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 2510int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
2508 struct extent_state **cached_state); 2511 struct extent_state **cached_state);
2509int btrfs_writepages(struct address_space *mapping, 2512int btrfs_writepages(struct address_space *mapping,
@@ -2520,7 +2523,6 @@ unsigned long btrfs_force_ra(struct address_space *mapping,
2520int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); 2523int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
2521int btrfs_readpage(struct file *file, struct page *page); 2524int btrfs_readpage(struct file *file, struct page *page);
2522void btrfs_evict_inode(struct inode *inode); 2525void btrfs_evict_inode(struct inode *inode);
2523void btrfs_put_inode(struct inode *inode);
2524int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); 2526int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
2525void btrfs_dirty_inode(struct inode *inode); 2527void btrfs_dirty_inode(struct inode *inode);
2526struct inode *btrfs_alloc_inode(struct super_block *sb); 2528struct inode *btrfs_alloc_inode(struct super_block *sb);
@@ -2531,10 +2533,8 @@ void btrfs_destroy_cachep(void);
2531long btrfs_ioctl_trans_end(struct file *file); 2533long btrfs_ioctl_trans_end(struct file *file);
2532struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, 2534struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
2533 struct btrfs_root *root, int *was_new); 2535 struct btrfs_root *root, int *was_new);
2534int btrfs_commit_write(struct file *file, struct page *page,
2535 unsigned from, unsigned to);
2536struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, 2536struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
2537 size_t page_offset, u64 start, u64 end, 2537 size_t pg_offset, u64 start, u64 end,
2538 int create); 2538 int create);
2539int btrfs_update_inode(struct btrfs_trans_handle *trans, 2539int btrfs_update_inode(struct btrfs_trans_handle *trans,
2540 struct btrfs_root *root, 2540 struct btrfs_root *root,
@@ -2566,12 +2566,16 @@ extern const struct dentry_operations btrfs_dentry_operations;
2566long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 2566long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
2567void btrfs_update_iflags(struct inode *inode); 2567void btrfs_update_iflags(struct inode *inode);
2568void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); 2568void btrfs_inherit_iflags(struct inode *inode, struct inode *dir);
2569 2569int btrfs_defrag_file(struct inode *inode, struct file *file,
2570 struct btrfs_ioctl_defrag_range_args *range,
2571 u64 newer_than, unsigned long max_pages);
2570/* file.c */ 2572/* file.c */
2573int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
2574 struct inode *inode);
2575int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);
2571int btrfs_sync_file(struct file *file, int datasync); 2576int btrfs_sync_file(struct file *file, int datasync);
2572int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 2577int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
2573 int skip_pinned); 2578 int skip_pinned);
2574int btrfs_check_file(struct btrfs_root *root, struct inode *inode);
2575extern const struct file_operations btrfs_file_operations; 2579extern const struct file_operations btrfs_file_operations;
2576int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, 2580int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
2577 u64 start, u64 end, u64 *hint_byte, int drop_cache); 2581 u64 start, u64 end, u64 *hint_byte, int drop_cache);
@@ -2591,10 +2595,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
2591/* sysfs.c */ 2595/* sysfs.c */
2592int btrfs_init_sysfs(void); 2596int btrfs_init_sysfs(void);
2593void btrfs_exit_sysfs(void); 2597void btrfs_exit_sysfs(void);
2594int btrfs_sysfs_add_super(struct btrfs_fs_info *fs);
2595int btrfs_sysfs_add_root(struct btrfs_root *root);
2596void btrfs_sysfs_del_root(struct btrfs_root *root);
2597void btrfs_sysfs_del_super(struct btrfs_fs_info *root);
2598 2598
2599/* xattr.c */ 2599/* xattr.c */
2600ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); 2600ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
@@ -2637,4 +2637,18 @@ void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
2637 u64 *bytes_to_reserve); 2637 u64 *bytes_to_reserve);
2638void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 2638void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
2639 struct btrfs_pending_snapshot *pending); 2639 struct btrfs_pending_snapshot *pending);
2640
2641/* scrub.c */
2642int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
2643 struct btrfs_scrub_progress *progress, int readonly);
2644int btrfs_scrub_pause(struct btrfs_root *root);
2645int btrfs_scrub_pause_super(struct btrfs_root *root);
2646int btrfs_scrub_continue(struct btrfs_root *root);
2647int btrfs_scrub_continue_super(struct btrfs_root *root);
2648int btrfs_scrub_cancel(struct btrfs_root *root);
2649int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev);
2650int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid);
2651int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
2652 struct btrfs_scrub_progress *progress);
2653
2640#endif 2654#endif
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
new file mode 100644
index 000000000000..01e29503a54b
--- /dev/null
+++ b/fs/btrfs/delayed-inode.c
@@ -0,0 +1,1695 @@
1/*
2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
18 */
19
20#include <linux/slab.h>
21#include "delayed-inode.h"
22#include "disk-io.h"
23#include "transaction.h"
24
25#define BTRFS_DELAYED_WRITEBACK 400
26#define BTRFS_DELAYED_BACKGROUND 100
27
28static struct kmem_cache *delayed_node_cache;
29
30int __init btrfs_delayed_inode_init(void)
31{
32 delayed_node_cache = kmem_cache_create("delayed_node",
33 sizeof(struct btrfs_delayed_node),
34 0,
35 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
36 NULL);
37 if (!delayed_node_cache)
38 return -ENOMEM;
39 return 0;
40}
41
42void btrfs_delayed_inode_exit(void)
43{
44 if (delayed_node_cache)
45 kmem_cache_destroy(delayed_node_cache);
46}
47
48static inline void btrfs_init_delayed_node(
49 struct btrfs_delayed_node *delayed_node,
50 struct btrfs_root *root, u64 inode_id)
51{
52 delayed_node->root = root;
53 delayed_node->inode_id = inode_id;
54 atomic_set(&delayed_node->refs, 0);
55 delayed_node->count = 0;
56 delayed_node->in_list = 0;
57 delayed_node->inode_dirty = 0;
58 delayed_node->ins_root = RB_ROOT;
59 delayed_node->del_root = RB_ROOT;
60 mutex_init(&delayed_node->mutex);
61 delayed_node->index_cnt = 0;
62 INIT_LIST_HEAD(&delayed_node->n_list);
63 INIT_LIST_HEAD(&delayed_node->p_list);
64 delayed_node->bytes_reserved = 0;
65}
66
67static inline int btrfs_is_continuous_delayed_item(
68 struct btrfs_delayed_item *item1,
69 struct btrfs_delayed_item *item2)
70{
71 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
72 item1->key.objectid == item2->key.objectid &&
73 item1->key.type == item2->key.type &&
74 item1->key.offset + 1 == item2->key.offset)
75 return 1;
76 return 0;
77}
78
79static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
80 struct btrfs_root *root)
81{
82 return root->fs_info->delayed_root;
83}
84
85static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
86 struct inode *inode)
87{
88 struct btrfs_delayed_node *node;
89 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
90 struct btrfs_root *root = btrfs_inode->root;
91 u64 ino = btrfs_ino(inode);
92 int ret;
93
94again:
95 node = ACCESS_ONCE(btrfs_inode->delayed_node);
96 if (node) {
97 atomic_inc(&node->refs); /* can be accessed */
98 return node;
99 }
100
101 spin_lock(&root->inode_lock);
102 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
103 if (node) {
104 if (btrfs_inode->delayed_node) {
105 spin_unlock(&root->inode_lock);
106 goto again;
107 }
108 btrfs_inode->delayed_node = node;
109 atomic_inc(&node->refs); /* can be accessed */
110 atomic_inc(&node->refs); /* cached in the inode */
111 spin_unlock(&root->inode_lock);
112 return node;
113 }
114 spin_unlock(&root->inode_lock);
115
116 node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
117 if (!node)
118 return ERR_PTR(-ENOMEM);
119 btrfs_init_delayed_node(node, root, ino);
120
121 atomic_inc(&node->refs); /* cached in the btrfs inode */
122 atomic_inc(&node->refs); /* can be accessed */
123
124 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
125 if (ret) {
126 kmem_cache_free(delayed_node_cache, node);
127 return ERR_PTR(ret);
128 }
129
130 spin_lock(&root->inode_lock);
131 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
132 if (ret == -EEXIST) {
133 kmem_cache_free(delayed_node_cache, node);
134 spin_unlock(&root->inode_lock);
135 radix_tree_preload_end();
136 goto again;
137 }
138 btrfs_inode->delayed_node = node;
139 spin_unlock(&root->inode_lock);
140 radix_tree_preload_end();
141
142 return node;
143}
144
145/*
146 * Call it when holding delayed_node->mutex
147 *
148 * If mod = 1, add this node into the prepared list.
149 */
150static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
151 struct btrfs_delayed_node *node,
152 int mod)
153{
154 spin_lock(&root->lock);
155 if (node->in_list) {
156 if (!list_empty(&node->p_list))
157 list_move_tail(&node->p_list, &root->prepare_list);
158 else if (mod)
159 list_add_tail(&node->p_list, &root->prepare_list);
160 } else {
161 list_add_tail(&node->n_list, &root->node_list);
162 list_add_tail(&node->p_list, &root->prepare_list);
163 atomic_inc(&node->refs); /* inserted into list */
164 root->nodes++;
165 node->in_list = 1;
166 }
167 spin_unlock(&root->lock);
168}
169
170/* Call it when holding delayed_node->mutex */
171static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
172 struct btrfs_delayed_node *node)
173{
174 spin_lock(&root->lock);
175 if (node->in_list) {
176 root->nodes--;
177 atomic_dec(&node->refs); /* not in the list */
178 list_del_init(&node->n_list);
179 if (!list_empty(&node->p_list))
180 list_del_init(&node->p_list);
181 node->in_list = 0;
182 }
183 spin_unlock(&root->lock);
184}
185
186struct btrfs_delayed_node *btrfs_first_delayed_node(
187 struct btrfs_delayed_root *delayed_root)
188{
189 struct list_head *p;
190 struct btrfs_delayed_node *node = NULL;
191
192 spin_lock(&delayed_root->lock);
193 if (list_empty(&delayed_root->node_list))
194 goto out;
195
196 p = delayed_root->node_list.next;
197 node = list_entry(p, struct btrfs_delayed_node, n_list);
198 atomic_inc(&node->refs);
199out:
200 spin_unlock(&delayed_root->lock);
201
202 return node;
203}
204
205struct btrfs_delayed_node *btrfs_next_delayed_node(
206 struct btrfs_delayed_node *node)
207{
208 struct btrfs_delayed_root *delayed_root;
209 struct list_head *p;
210 struct btrfs_delayed_node *next = NULL;
211
212 delayed_root = node->root->fs_info->delayed_root;
213 spin_lock(&delayed_root->lock);
214 if (!node->in_list) { /* not in the list */
215 if (list_empty(&delayed_root->node_list))
216 goto out;
217 p = delayed_root->node_list.next;
218 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
219 goto out;
220 else
221 p = node->n_list.next;
222
223 next = list_entry(p, struct btrfs_delayed_node, n_list);
224 atomic_inc(&next->refs);
225out:
226 spin_unlock(&delayed_root->lock);
227
228 return next;
229}
230
231static void __btrfs_release_delayed_node(
232 struct btrfs_delayed_node *delayed_node,
233 int mod)
234{
235 struct btrfs_delayed_root *delayed_root;
236
237 if (!delayed_node)
238 return;
239
240 delayed_root = delayed_node->root->fs_info->delayed_root;
241
242 mutex_lock(&delayed_node->mutex);
243 if (delayed_node->count)
244 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
245 else
246 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
247 mutex_unlock(&delayed_node->mutex);
248
249 if (atomic_dec_and_test(&delayed_node->refs)) {
250 struct btrfs_root *root = delayed_node->root;
251 spin_lock(&root->inode_lock);
252 if (atomic_read(&delayed_node->refs) == 0) {
253 radix_tree_delete(&root->delayed_nodes_tree,
254 delayed_node->inode_id);
255 kmem_cache_free(delayed_node_cache, delayed_node);
256 }
257 spin_unlock(&root->inode_lock);
258 }
259}
260
261static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
262{
263 __btrfs_release_delayed_node(node, 0);
264}
265
266struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
267 struct btrfs_delayed_root *delayed_root)
268{
269 struct list_head *p;
270 struct btrfs_delayed_node *node = NULL;
271
272 spin_lock(&delayed_root->lock);
273 if (list_empty(&delayed_root->prepare_list))
274 goto out;
275
276 p = delayed_root->prepare_list.next;
277 list_del_init(p);
278 node = list_entry(p, struct btrfs_delayed_node, p_list);
279 atomic_inc(&node->refs);
280out:
281 spin_unlock(&delayed_root->lock);
282
283 return node;
284}
285
286static inline void btrfs_release_prepared_delayed_node(
287 struct btrfs_delayed_node *node)
288{
289 __btrfs_release_delayed_node(node, 1);
290}
291
292struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
293{
294 struct btrfs_delayed_item *item;
295 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
296 if (item) {
297 item->data_len = data_len;
298 item->ins_or_del = 0;
299 item->bytes_reserved = 0;
300 item->block_rsv = NULL;
301 item->delayed_node = NULL;
302 atomic_set(&item->refs, 1);
303 }
304 return item;
305}
306
307/*
308 * __btrfs_lookup_delayed_item - look up the delayed item by key
309 * @delayed_node: pointer to the delayed node
310 * @key: the key to look up
311 * @prev: used to store the prev item if the right item isn't found
312 * @next: used to store the next item if the right item isn't found
313 *
314 * Note: if we don't find the right item, we will return the prev item and
315 * the next item.
316 */
317static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
318 struct rb_root *root,
319 struct btrfs_key *key,
320 struct btrfs_delayed_item **prev,
321 struct btrfs_delayed_item **next)
322{
323 struct rb_node *node, *prev_node = NULL;
324 struct btrfs_delayed_item *delayed_item = NULL;
325 int ret = 0;
326
327 node = root->rb_node;
328
329 while (node) {
330 delayed_item = rb_entry(node, struct btrfs_delayed_item,
331 rb_node);
332 prev_node = node;
333 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
334 if (ret < 0)
335 node = node->rb_right;
336 else if (ret > 0)
337 node = node->rb_left;
338 else
339 return delayed_item;
340 }
341
342 if (prev) {
343 if (!prev_node)
344 *prev = NULL;
345 else if (ret < 0)
346 *prev = delayed_item;
347 else if ((node = rb_prev(prev_node)) != NULL) {
348 *prev = rb_entry(node, struct btrfs_delayed_item,
349 rb_node);
350 } else
351 *prev = NULL;
352 }
353
354 if (next) {
355 if (!prev_node)
356 *next = NULL;
357 else if (ret > 0)
358 *next = delayed_item;
359 else if ((node = rb_next(prev_node)) != NULL) {
360 *next = rb_entry(node, struct btrfs_delayed_item,
361 rb_node);
362 } else
363 *next = NULL;
364 }
365 return NULL;
366}
367
368struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
369 struct btrfs_delayed_node *delayed_node,
370 struct btrfs_key *key)
371{
372 struct btrfs_delayed_item *item;
373
374 item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
375 NULL, NULL);
376 return item;
377}
378
379struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item(
380 struct btrfs_delayed_node *delayed_node,
381 struct btrfs_key *key)
382{
383 struct btrfs_delayed_item *item;
384
385 item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
386 NULL, NULL);
387 return item;
388}
389
390struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item(
391 struct btrfs_delayed_node *delayed_node,
392 struct btrfs_key *key)
393{
394 struct btrfs_delayed_item *item, *next;
395
396 item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
397 NULL, &next);
398 if (!item)
399 item = next;
400
401 return item;
402}
403
404struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item(
405 struct btrfs_delayed_node *delayed_node,
406 struct btrfs_key *key)
407{
408 struct btrfs_delayed_item *item, *next;
409
410 item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
411 NULL, &next);
412 if (!item)
413 item = next;
414
415 return item;
416}
417
418static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
419 struct btrfs_delayed_item *ins,
420 int action)
421{
422 struct rb_node **p, *node;
423 struct rb_node *parent_node = NULL;
424 struct rb_root *root;
425 struct btrfs_delayed_item *item;
426 int cmp;
427
428 if (action == BTRFS_DELAYED_INSERTION_ITEM)
429 root = &delayed_node->ins_root;
430 else if (action == BTRFS_DELAYED_DELETION_ITEM)
431 root = &delayed_node->del_root;
432 else
433 BUG();
434 p = &root->rb_node;
435 node = &ins->rb_node;
436
437 while (*p) {
438 parent_node = *p;
439 item = rb_entry(parent_node, struct btrfs_delayed_item,
440 rb_node);
441
442 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
443 if (cmp < 0)
444 p = &(*p)->rb_right;
445 else if (cmp > 0)
446 p = &(*p)->rb_left;
447 else
448 return -EEXIST;
449 }
450
451 rb_link_node(node, parent_node, p);
452 rb_insert_color(node, root);
453 ins->delayed_node = delayed_node;
454 ins->ins_or_del = action;
455
456 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
457 action == BTRFS_DELAYED_INSERTION_ITEM &&
458 ins->key.offset >= delayed_node->index_cnt)
459 delayed_node->index_cnt = ins->key.offset + 1;
460
461 delayed_node->count++;
462 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
463 return 0;
464}
465
466static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
467 struct btrfs_delayed_item *item)
468{
469 return __btrfs_add_delayed_item(node, item,
470 BTRFS_DELAYED_INSERTION_ITEM);
471}
472
473static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
474 struct btrfs_delayed_item *item)
475{
476 return __btrfs_add_delayed_item(node, item,
477 BTRFS_DELAYED_DELETION_ITEM);
478}
479
480static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
481{
482 struct rb_root *root;
483 struct btrfs_delayed_root *delayed_root;
484
485 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
486
487 BUG_ON(!delayed_root);
488 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
489 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
490
491 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
492 root = &delayed_item->delayed_node->ins_root;
493 else
494 root = &delayed_item->delayed_node->del_root;
495
496 rb_erase(&delayed_item->rb_node, root);
497 delayed_item->delayed_node->count--;
498 atomic_dec(&delayed_root->items);
499 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND &&
500 waitqueue_active(&delayed_root->wait))
501 wake_up(&delayed_root->wait);
502}
503
504static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
505{
506 if (item) {
507 __btrfs_remove_delayed_item(item);
508 if (atomic_dec_and_test(&item->refs))
509 kfree(item);
510 }
511}
512
513struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
514 struct btrfs_delayed_node *delayed_node)
515{
516 struct rb_node *p;
517 struct btrfs_delayed_item *item = NULL;
518
519 p = rb_first(&delayed_node->ins_root);
520 if (p)
521 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
522
523 return item;
524}
525
526struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
527 struct btrfs_delayed_node *delayed_node)
528{
529 struct rb_node *p;
530 struct btrfs_delayed_item *item = NULL;
531
532 p = rb_first(&delayed_node->del_root);
533 if (p)
534 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
535
536 return item;
537}
538
539struct btrfs_delayed_item *__btrfs_next_delayed_item(
540 struct btrfs_delayed_item *item)
541{
542 struct rb_node *p;
543 struct btrfs_delayed_item *next = NULL;
544
545 p = rb_next(&item->rb_node);
546 if (p)
547 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
548
549 return next;
550}
551
552static inline struct btrfs_delayed_node *btrfs_get_delayed_node(
553 struct inode *inode)
554{
555 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
556 struct btrfs_delayed_node *delayed_node;
557
558 delayed_node = btrfs_inode->delayed_node;
559 if (delayed_node)
560 atomic_inc(&delayed_node->refs);
561
562 return delayed_node;
563}
564
565static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root,
566 u64 root_id)
567{
568 struct btrfs_key root_key;
569
570 if (root->objectid == root_id)
571 return root;
572
573 root_key.objectid = root_id;
574 root_key.type = BTRFS_ROOT_ITEM_KEY;
575 root_key.offset = (u64)-1;
576 return btrfs_read_fs_root_no_name(root->fs_info, &root_key);
577}
578
579static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
580 struct btrfs_root *root,
581 struct btrfs_delayed_item *item)
582{
583 struct btrfs_block_rsv *src_rsv;
584 struct btrfs_block_rsv *dst_rsv;
585 u64 num_bytes;
586 int ret;
587
588 if (!trans->bytes_reserved)
589 return 0;
590
591 src_rsv = trans->block_rsv;
592 dst_rsv = &root->fs_info->global_block_rsv;
593
594 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
595 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
596 if (!ret) {
597 item->bytes_reserved = num_bytes;
598 item->block_rsv = dst_rsv;
599 }
600
601 return ret;
602}
603
604static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
605 struct btrfs_delayed_item *item)
606{
607 if (!item->bytes_reserved)
608 return;
609
610 btrfs_block_rsv_release(root, item->block_rsv,
611 item->bytes_reserved);
612}
613
614static int btrfs_delayed_inode_reserve_metadata(
615 struct btrfs_trans_handle *trans,
616 struct btrfs_root *root,
617 struct btrfs_delayed_node *node)
618{
619 struct btrfs_block_rsv *src_rsv;
620 struct btrfs_block_rsv *dst_rsv;
621 u64 num_bytes;
622 int ret;
623
624 if (!trans->bytes_reserved)
625 return 0;
626
627 src_rsv = trans->block_rsv;
628 dst_rsv = &root->fs_info->global_block_rsv;
629
630 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
631 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
632 if (!ret)
633 node->bytes_reserved = num_bytes;
634
635 return ret;
636}
637
638static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
639 struct btrfs_delayed_node *node)
640{
641 struct btrfs_block_rsv *rsv;
642
643 if (!node->bytes_reserved)
644 return;
645
646 rsv = &root->fs_info->global_block_rsv;
647 btrfs_block_rsv_release(root, rsv,
648 node->bytes_reserved);
649 node->bytes_reserved = 0;
650}
651
652/*
653 * This helper will insert some continuous items into the same leaf according
654 * to the free space of the leaf.
655 */
656static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
657 struct btrfs_root *root,
658 struct btrfs_path *path,
659 struct btrfs_delayed_item *item)
660{
661 struct btrfs_delayed_item *curr, *next;
662 int free_space;
663 int total_data_size = 0, total_size = 0;
664 struct extent_buffer *leaf;
665 char *data_ptr;
666 struct btrfs_key *keys;
667 u32 *data_size;
668 struct list_head head;
669 int slot;
670 int nitems;
671 int i;
672 int ret = 0;
673
674 BUG_ON(!path->nodes[0]);
675
676 leaf = path->nodes[0];
677 free_space = btrfs_leaf_free_space(root, leaf);
678 INIT_LIST_HEAD(&head);
679
680 next = item;
681
682 /*
683 * count the number of the continuous items that we can insert in batch
684 */
685 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
686 free_space) {
687 total_data_size += next->data_len;
688 total_size += next->data_len + sizeof(struct btrfs_item);
689 list_add_tail(&next->tree_list, &head);
690 nitems++;
691
692 curr = next;
693 next = __btrfs_next_delayed_item(curr);
694 if (!next)
695 break;
696
697 if (!btrfs_is_continuous_delayed_item(curr, next))
698 break;
699 }
700
701 if (!nitems) {
702 ret = 0;
703 goto out;
704 }
705
706 /*
707 * we need allocate some memory space, but it might cause the task
708 * to sleep, so we set all locked nodes in the path to blocking locks
709 * first.
710 */
711 btrfs_set_path_blocking(path);
712
713 keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS);
714 if (!keys) {
715 ret = -ENOMEM;
716 goto out;
717 }
718
719 data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS);
720 if (!data_size) {
721 ret = -ENOMEM;
722 goto error;
723 }
724
725 /* get keys of all the delayed items */
726 i = 0;
727 list_for_each_entry(next, &head, tree_list) {
728 keys[i] = next->key;
729 data_size[i] = next->data_len;
730 i++;
731 }
732
733 /* reset all the locked nodes in the patch to spinning locks. */
734 btrfs_clear_path_blocking(path, NULL);
735
736 /* insert the keys of the items */
737 ret = setup_items_for_insert(trans, root, path, keys, data_size,
738 total_data_size, total_size, nitems);
739 if (ret)
740 goto error;
741
742 /* insert the dir index items */
743 slot = path->slots[0];
744 list_for_each_entry_safe(curr, next, &head, tree_list) {
745 data_ptr = btrfs_item_ptr(leaf, slot, char);
746 write_extent_buffer(leaf, &curr->data,
747 (unsigned long)data_ptr,
748 curr->data_len);
749 slot++;
750
751 btrfs_delayed_item_release_metadata(root, curr);
752
753 list_del(&curr->tree_list);
754 btrfs_release_delayed_item(curr);
755 }
756
757error:
758 kfree(data_size);
759 kfree(keys);
760out:
761 return ret;
762}
763
764/*
765 * This helper can just do simple insertion that needn't extend item for new
766 * data, such as directory name index insertion, inode insertion.
767 */
768static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
769 struct btrfs_root *root,
770 struct btrfs_path *path,
771 struct btrfs_delayed_item *delayed_item)
772{
773 struct extent_buffer *leaf;
774 struct btrfs_item *item;
775 char *ptr;
776 int ret;
777
778 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
779 delayed_item->data_len);
780 if (ret < 0 && ret != -EEXIST)
781 return ret;
782
783 leaf = path->nodes[0];
784
785 item = btrfs_item_nr(leaf, path->slots[0]);
786 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
787
788 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
789 delayed_item->data_len);
790 btrfs_mark_buffer_dirty(leaf);
791
792 btrfs_delayed_item_release_metadata(root, delayed_item);
793 return 0;
794}
795
796/*
797 * we insert an item first, then if there are some continuous items, we try
798 * to insert those items into the same leaf.
799 */
800static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
801 struct btrfs_path *path,
802 struct btrfs_root *root,
803 struct btrfs_delayed_node *node)
804{
805 struct btrfs_delayed_item *curr, *prev;
806 int ret = 0;
807
808do_again:
809 mutex_lock(&node->mutex);
810 curr = __btrfs_first_delayed_insertion_item(node);
811 if (!curr)
812 goto insert_end;
813
814 ret = btrfs_insert_delayed_item(trans, root, path, curr);
815 if (ret < 0) {
816 btrfs_release_path(path);
817 goto insert_end;
818 }
819
820 prev = curr;
821 curr = __btrfs_next_delayed_item(prev);
822 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
823 /* insert the continuous items into the same leaf */
824 path->slots[0]++;
825 btrfs_batch_insert_items(trans, root, path, curr);
826 }
827 btrfs_release_delayed_item(prev);
828 btrfs_mark_buffer_dirty(path->nodes[0]);
829
830 btrfs_release_path(path);
831 mutex_unlock(&node->mutex);
832 goto do_again;
833
834insert_end:
835 mutex_unlock(&node->mutex);
836 return ret;
837}
838
839static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
840 struct btrfs_root *root,
841 struct btrfs_path *path,
842 struct btrfs_delayed_item *item)
843{
844 struct btrfs_delayed_item *curr, *next;
845 struct extent_buffer *leaf;
846 struct btrfs_key key;
847 struct list_head head;
848 int nitems, i, last_item;
849 int ret = 0;
850
851 BUG_ON(!path->nodes[0]);
852
853 leaf = path->nodes[0];
854
855 i = path->slots[0];
856 last_item = btrfs_header_nritems(leaf) - 1;
857 if (i > last_item)
858 return -ENOENT; /* FIXME: Is errno suitable? */
859
860 next = item;
861 INIT_LIST_HEAD(&head);
862 btrfs_item_key_to_cpu(leaf, &key, i);
863 nitems = 0;
864 /*
865 * count the number of the dir index items that we can delete in batch
866 */
867 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
868 list_add_tail(&next->tree_list, &head);
869 nitems++;
870
871 curr = next;
872 next = __btrfs_next_delayed_item(curr);
873 if (!next)
874 break;
875
876 if (!btrfs_is_continuous_delayed_item(curr, next))
877 break;
878
879 i++;
880 if (i > last_item)
881 break;
882 btrfs_item_key_to_cpu(leaf, &key, i);
883 }
884
885 if (!nitems)
886 return 0;
887
888 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
889 if (ret)
890 goto out;
891
892 list_for_each_entry_safe(curr, next, &head, tree_list) {
893 btrfs_delayed_item_release_metadata(root, curr);
894 list_del(&curr->tree_list);
895 btrfs_release_delayed_item(curr);
896 }
897
898out:
899 return ret;
900}
901
902static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
903 struct btrfs_path *path,
904 struct btrfs_root *root,
905 struct btrfs_delayed_node *node)
906{
907 struct btrfs_delayed_item *curr, *prev;
908 int ret = 0;
909
910do_again:
911 mutex_lock(&node->mutex);
912 curr = __btrfs_first_delayed_deletion_item(node);
913 if (!curr)
914 goto delete_fail;
915
916 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
917 if (ret < 0)
918 goto delete_fail;
919 else if (ret > 0) {
920 /*
921 * can't find the item which the node points to, so this node
922 * is invalid, just drop it.
923 */
924 prev = curr;
925 curr = __btrfs_next_delayed_item(prev);
926 btrfs_release_delayed_item(prev);
927 ret = 0;
928 btrfs_release_path(path);
929 if (curr)
930 goto do_again;
931 else
932 goto delete_fail;
933 }
934
935 btrfs_batch_delete_items(trans, root, path, curr);
936 btrfs_release_path(path);
937 mutex_unlock(&node->mutex);
938 goto do_again;
939
940delete_fail:
941 btrfs_release_path(path);
942 mutex_unlock(&node->mutex);
943 return ret;
944}
945
946static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
947{
948 struct btrfs_delayed_root *delayed_root;
949
950 if (delayed_node && delayed_node->inode_dirty) {
951 BUG_ON(!delayed_node->root);
952 delayed_node->inode_dirty = 0;
953 delayed_node->count--;
954
955 delayed_root = delayed_node->root->fs_info->delayed_root;
956 atomic_dec(&delayed_root->items);
957 if (atomic_read(&delayed_root->items) <
958 BTRFS_DELAYED_BACKGROUND &&
959 waitqueue_active(&delayed_root->wait))
960 wake_up(&delayed_root->wait);
961 }
962}
963
964static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
965 struct btrfs_root *root,
966 struct btrfs_path *path,
967 struct btrfs_delayed_node *node)
968{
969 struct btrfs_key key;
970 struct btrfs_inode_item *inode_item;
971 struct extent_buffer *leaf;
972 int ret;
973
974 mutex_lock(&node->mutex);
975 if (!node->inode_dirty) {
976 mutex_unlock(&node->mutex);
977 return 0;
978 }
979
980 key.objectid = node->inode_id;
981 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
982 key.offset = 0;
983 ret = btrfs_lookup_inode(trans, root, path, &key, 1);
984 if (ret > 0) {
985 btrfs_release_path(path);
986 mutex_unlock(&node->mutex);
987 return -ENOENT;
988 } else if (ret < 0) {
989 mutex_unlock(&node->mutex);
990 return ret;
991 }
992
993 btrfs_unlock_up_safe(path, 1);
994 leaf = path->nodes[0];
995 inode_item = btrfs_item_ptr(leaf, path->slots[0],
996 struct btrfs_inode_item);
997 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
998 sizeof(struct btrfs_inode_item));
999 btrfs_mark_buffer_dirty(leaf);
1000 btrfs_release_path(path);
1001
1002 btrfs_delayed_inode_release_metadata(root, node);
1003 btrfs_release_delayed_inode(node);
1004 mutex_unlock(&node->mutex);
1005
1006 return 0;
1007}
1008
1009/* Called when committing the transaction. */
1010int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1011 struct btrfs_root *root)
1012{
1013 struct btrfs_delayed_root *delayed_root;
1014 struct btrfs_delayed_node *curr_node, *prev_node;
1015 struct btrfs_path *path;
1016 int ret = 0;
1017
1018 path = btrfs_alloc_path();
1019 if (!path)
1020 return -ENOMEM;
1021 path->leave_spinning = 1;
1022
1023 delayed_root = btrfs_get_delayed_root(root);
1024
1025 curr_node = btrfs_first_delayed_node(delayed_root);
1026 while (curr_node) {
1027 root = curr_node->root;
1028 ret = btrfs_insert_delayed_items(trans, path, root,
1029 curr_node);
1030 if (!ret)
1031 ret = btrfs_delete_delayed_items(trans, path, root,
1032 curr_node);
1033 if (!ret)
1034 ret = btrfs_update_delayed_inode(trans, root, path,
1035 curr_node);
1036 if (ret) {
1037 btrfs_release_delayed_node(curr_node);
1038 break;
1039 }
1040
1041 prev_node = curr_node;
1042 curr_node = btrfs_next_delayed_node(curr_node);
1043 btrfs_release_delayed_node(prev_node);
1044 }
1045
1046 btrfs_free_path(path);
1047 return ret;
1048}
1049
1050static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1051 struct btrfs_delayed_node *node)
1052{
1053 struct btrfs_path *path;
1054 int ret;
1055
1056 path = btrfs_alloc_path();
1057 if (!path)
1058 return -ENOMEM;
1059 path->leave_spinning = 1;
1060
1061 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1062 if (!ret)
1063 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1064 if (!ret)
1065 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1066 btrfs_free_path(path);
1067
1068 return ret;
1069}
1070
1071int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1072 struct inode *inode)
1073{
1074 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1075 int ret;
1076
1077 if (!delayed_node)
1078 return 0;
1079
1080 mutex_lock(&delayed_node->mutex);
1081 if (!delayed_node->count) {
1082 mutex_unlock(&delayed_node->mutex);
1083 btrfs_release_delayed_node(delayed_node);
1084 return 0;
1085 }
1086 mutex_unlock(&delayed_node->mutex);
1087
1088 ret = __btrfs_commit_inode_delayed_items(trans, delayed_node);
1089 btrfs_release_delayed_node(delayed_node);
1090 return ret;
1091}
1092
1093void btrfs_remove_delayed_node(struct inode *inode)
1094{
1095 struct btrfs_delayed_node *delayed_node;
1096
1097 delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
1098 if (!delayed_node)
1099 return;
1100
1101 BTRFS_I(inode)->delayed_node = NULL;
1102 btrfs_release_delayed_node(delayed_node);
1103}
1104
1105struct btrfs_async_delayed_node {
1106 struct btrfs_root *root;
1107 struct btrfs_delayed_node *delayed_node;
1108 struct btrfs_work work;
1109};
1110
1111static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
1112{
1113 struct btrfs_async_delayed_node *async_node;
1114 struct btrfs_trans_handle *trans;
1115 struct btrfs_path *path;
1116 struct btrfs_delayed_node *delayed_node = NULL;
1117 struct btrfs_root *root;
1118 unsigned long nr = 0;
1119 int need_requeue = 0;
1120 int ret;
1121
1122 async_node = container_of(work, struct btrfs_async_delayed_node, work);
1123
1124 path = btrfs_alloc_path();
1125 if (!path)
1126 goto out;
1127 path->leave_spinning = 1;
1128
1129 delayed_node = async_node->delayed_node;
1130 root = delayed_node->root;
1131
1132 trans = btrfs_join_transaction(root, 0);
1133 if (IS_ERR(trans))
1134 goto free_path;
1135
1136 ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
1137 if (!ret)
1138 ret = btrfs_delete_delayed_items(trans, path, root,
1139 delayed_node);
1140
1141 if (!ret)
1142 btrfs_update_delayed_inode(trans, root, path, delayed_node);
1143
1144 /*
1145 * Maybe new delayed items have been inserted, so we need requeue
1146 * the work. Besides that, we must dequeue the empty delayed nodes
1147 * to avoid the race between delayed items balance and the worker.
1148 * The race like this:
1149 * Task1 Worker thread
1150 * count == 0, needn't requeue
1151 * also needn't insert the
1152 * delayed node into prepare
1153 * list again.
1154 * add lots of delayed items
1155 * queue the delayed node
1156 * already in the list,
1157 * and not in the prepare
1158 * list, it means the delayed
1159 * node is being dealt with
1160 * by the worker.
1161 * do delayed items balance
1162 * the delayed node is being
1163 * dealt with by the worker
1164 * now, just wait.
1165 * the worker goto idle.
1166 * Task1 will sleep until the transaction is commited.
1167 */
1168 mutex_lock(&delayed_node->mutex);
1169 if (delayed_node->count)
1170 need_requeue = 1;
1171 else
1172 btrfs_dequeue_delayed_node(root->fs_info->delayed_root,
1173 delayed_node);
1174 mutex_unlock(&delayed_node->mutex);
1175
1176 nr = trans->blocks_used;
1177
1178 btrfs_end_transaction_dmeta(trans, root);
1179 __btrfs_btree_balance_dirty(root, nr);
1180free_path:
1181 btrfs_free_path(path);
1182out:
1183 if (need_requeue)
1184 btrfs_requeue_work(&async_node->work);
1185 else {
1186 btrfs_release_prepared_delayed_node(delayed_node);
1187 kfree(async_node);
1188 }
1189}
1190
1191static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1192 struct btrfs_root *root, int all)
1193{
1194 struct btrfs_async_delayed_node *async_node;
1195 struct btrfs_delayed_node *curr;
1196 int count = 0;
1197
1198again:
1199 curr = btrfs_first_prepared_delayed_node(delayed_root);
1200 if (!curr)
1201 return 0;
1202
1203 async_node = kmalloc(sizeof(*async_node), GFP_NOFS);
1204 if (!async_node) {
1205 btrfs_release_prepared_delayed_node(curr);
1206 return -ENOMEM;
1207 }
1208
1209 async_node->root = root;
1210 async_node->delayed_node = curr;
1211
1212 async_node->work.func = btrfs_async_run_delayed_node_done;
1213 async_node->work.flags = 0;
1214
1215 btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work);
1216 count++;
1217
1218 if (all || count < 4)
1219 goto again;
1220
1221 return 0;
1222}
1223
1224void btrfs_balance_delayed_items(struct btrfs_root *root)
1225{
1226 struct btrfs_delayed_root *delayed_root;
1227
1228 delayed_root = btrfs_get_delayed_root(root);
1229
1230 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1231 return;
1232
1233 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1234 int ret;
1235 ret = btrfs_wq_run_delayed_node(delayed_root, root, 1);
1236 if (ret)
1237 return;
1238
1239 wait_event_interruptible_timeout(
1240 delayed_root->wait,
1241 (atomic_read(&delayed_root->items) <
1242 BTRFS_DELAYED_BACKGROUND),
1243 HZ);
1244 return;
1245 }
1246
1247 btrfs_wq_run_delayed_node(delayed_root, root, 0);
1248}
1249
1250int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1251 struct btrfs_root *root, const char *name,
1252 int name_len, struct inode *dir,
1253 struct btrfs_disk_key *disk_key, u8 type,
1254 u64 index)
1255{
1256 struct btrfs_delayed_node *delayed_node;
1257 struct btrfs_delayed_item *delayed_item;
1258 struct btrfs_dir_item *dir_item;
1259 int ret;
1260
1261 delayed_node = btrfs_get_or_create_delayed_node(dir);
1262 if (IS_ERR(delayed_node))
1263 return PTR_ERR(delayed_node);
1264
1265 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1266 if (!delayed_item) {
1267 ret = -ENOMEM;
1268 goto release_node;
1269 }
1270
1271 ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
1272 /*
1273 * we have reserved enough space when we start a new transaction,
1274 * so reserving metadata failure is impossible
1275 */
1276 BUG_ON(ret);
1277
1278 delayed_item->key.objectid = btrfs_ino(dir);
1279 btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
1280 delayed_item->key.offset = index;
1281
1282 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1283 dir_item->location = *disk_key;
1284 dir_item->transid = cpu_to_le64(trans->transid);
1285 dir_item->data_len = 0;
1286 dir_item->name_len = cpu_to_le16(name_len);
1287 dir_item->type = type;
1288 memcpy((char *)(dir_item + 1), name, name_len);
1289
1290 mutex_lock(&delayed_node->mutex);
1291 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1292 if (unlikely(ret)) {
1293 printk(KERN_ERR "err add delayed dir index item(name: %s) into "
1294 "the insertion tree of the delayed node"
1295 "(root id: %llu, inode id: %llu, errno: %d)\n",
1296 name,
1297 (unsigned long long)delayed_node->root->objectid,
1298 (unsigned long long)delayed_node->inode_id,
1299 ret);
1300 BUG();
1301 }
1302 mutex_unlock(&delayed_node->mutex);
1303
1304release_node:
1305 btrfs_release_delayed_node(delayed_node);
1306 return ret;
1307}
1308
1309static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
1310 struct btrfs_delayed_node *node,
1311 struct btrfs_key *key)
1312{
1313 struct btrfs_delayed_item *item;
1314
1315 mutex_lock(&node->mutex);
1316 item = __btrfs_lookup_delayed_insertion_item(node, key);
1317 if (!item) {
1318 mutex_unlock(&node->mutex);
1319 return 1;
1320 }
1321
1322 btrfs_delayed_item_release_metadata(root, item);
1323 btrfs_release_delayed_item(item);
1324 mutex_unlock(&node->mutex);
1325 return 0;
1326}
1327
1328int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1329 struct btrfs_root *root, struct inode *dir,
1330 u64 index)
1331{
1332 struct btrfs_delayed_node *node;
1333 struct btrfs_delayed_item *item;
1334 struct btrfs_key item_key;
1335 int ret;
1336
1337 node = btrfs_get_or_create_delayed_node(dir);
1338 if (IS_ERR(node))
1339 return PTR_ERR(node);
1340
1341 item_key.objectid = btrfs_ino(dir);
1342 btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY);
1343 item_key.offset = index;
1344
1345 ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
1346 if (!ret)
1347 goto end;
1348
1349 item = btrfs_alloc_delayed_item(0);
1350 if (!item) {
1351 ret = -ENOMEM;
1352 goto end;
1353 }
1354
1355 item->key = item_key;
1356
1357 ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
1358 /*
1359 * we have reserved enough space when we start a new transaction,
1360 * so reserving metadata failure is impossible.
1361 */
1362 BUG_ON(ret);
1363
1364 mutex_lock(&node->mutex);
1365 ret = __btrfs_add_delayed_deletion_item(node, item);
1366 if (unlikely(ret)) {
1367 printk(KERN_ERR "err add delayed dir index item(index: %llu) "
1368 "into the deletion tree of the delayed node"
1369 "(root id: %llu, inode id: %llu, errno: %d)\n",
1370 (unsigned long long)index,
1371 (unsigned long long)node->root->objectid,
1372 (unsigned long long)node->inode_id,
1373 ret);
1374 BUG();
1375 }
1376 mutex_unlock(&node->mutex);
1377end:
1378 btrfs_release_delayed_node(node);
1379 return ret;
1380}
1381
1382int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1383{
1384 struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node;
1385 int ret = 0;
1386
1387 if (!delayed_node)
1388 return -ENOENT;
1389
1390 /*
1391 * Since we have held i_mutex of this directory, it is impossible that
1392 * a new directory index is added into the delayed node and index_cnt
1393 * is updated now. So we needn't lock the delayed node.
1394 */
1395 if (!delayed_node->index_cnt)
1396 return -EINVAL;
1397
1398 BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1399 return ret;
1400}
1401
1402void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
1403 struct list_head *del_list)
1404{
1405 struct btrfs_delayed_node *delayed_node;
1406 struct btrfs_delayed_item *item;
1407
1408 delayed_node = btrfs_get_delayed_node(inode);
1409 if (!delayed_node)
1410 return;
1411
1412 mutex_lock(&delayed_node->mutex);
1413 item = __btrfs_first_delayed_insertion_item(delayed_node);
1414 while (item) {
1415 atomic_inc(&item->refs);
1416 list_add_tail(&item->readdir_list, ins_list);
1417 item = __btrfs_next_delayed_item(item);
1418 }
1419
1420 item = __btrfs_first_delayed_deletion_item(delayed_node);
1421 while (item) {
1422 atomic_inc(&item->refs);
1423 list_add_tail(&item->readdir_list, del_list);
1424 item = __btrfs_next_delayed_item(item);
1425 }
1426 mutex_unlock(&delayed_node->mutex);
1427 /*
1428 * This delayed node is still cached in the btrfs inode, so refs
1429 * must be > 1 now, and we needn't check it is going to be freed
1430 * or not.
1431 *
1432 * Besides that, this function is used to read dir, we do not
1433 * insert/delete delayed items in this period. So we also needn't
1434 * requeue or dequeue this delayed node.
1435 */
1436 atomic_dec(&delayed_node->refs);
1437}
1438
1439void btrfs_put_delayed_items(struct list_head *ins_list,
1440 struct list_head *del_list)
1441{
1442 struct btrfs_delayed_item *curr, *next;
1443
1444 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1445 list_del(&curr->readdir_list);
1446 if (atomic_dec_and_test(&curr->refs))
1447 kfree(curr);
1448 }
1449
1450 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1451 list_del(&curr->readdir_list);
1452 if (atomic_dec_and_test(&curr->refs))
1453 kfree(curr);
1454 }
1455}
1456
1457int btrfs_should_delete_dir_index(struct list_head *del_list,
1458 u64 index)
1459{
1460 struct btrfs_delayed_item *curr, *next;
1461 int ret;
1462
1463 if (list_empty(del_list))
1464 return 0;
1465
1466 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1467 if (curr->key.offset > index)
1468 break;
1469
1470 list_del(&curr->readdir_list);
1471 ret = (curr->key.offset == index);
1472
1473 if (atomic_dec_and_test(&curr->refs))
1474 kfree(curr);
1475
1476 if (ret)
1477 return 1;
1478 else
1479 continue;
1480 }
1481 return 0;
1482}
1483
1484/*
1485 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1486 *
1487 */
1488int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
1489 filldir_t filldir,
1490 struct list_head *ins_list)
1491{
1492 struct btrfs_dir_item *di;
1493 struct btrfs_delayed_item *curr, *next;
1494 struct btrfs_key location;
1495 char *name;
1496 int name_len;
1497 int over = 0;
1498 unsigned char d_type;
1499
1500 if (list_empty(ins_list))
1501 return 0;
1502
1503 /*
1504 * Changing the data of the delayed item is impossible. So
1505 * we needn't lock them. And we have held i_mutex of the
1506 * directory, nobody can delete any directory indexes now.
1507 */
1508 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1509 list_del(&curr->readdir_list);
1510
1511 if (curr->key.offset < filp->f_pos) {
1512 if (atomic_dec_and_test(&curr->refs))
1513 kfree(curr);
1514 continue;
1515 }
1516
1517 filp->f_pos = curr->key.offset;
1518
1519 di = (struct btrfs_dir_item *)curr->data;
1520 name = (char *)(di + 1);
1521 name_len = le16_to_cpu(di->name_len);
1522
1523 d_type = btrfs_filetype_table[di->type];
1524 btrfs_disk_key_to_cpu(&location, &di->location);
1525
1526 over = filldir(dirent, name, name_len, curr->key.offset,
1527 location.objectid, d_type);
1528
1529 if (atomic_dec_and_test(&curr->refs))
1530 kfree(curr);
1531
1532 if (over)
1533 return 1;
1534 }
1535 return 0;
1536}
1537
1538BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item,
1539 generation, 64);
1540BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item,
1541 sequence, 64);
1542BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item,
1543 transid, 64);
1544BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64);
1545BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item,
1546 nbytes, 64);
1547BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item,
1548 block_group, 64);
1549BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32);
1550BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32);
1551BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32);
1552BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32);
1553BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64);
1554BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64);
1555
1556BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
1557BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32);
1558
1559static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1560 struct btrfs_inode_item *inode_item,
1561 struct inode *inode)
1562{
1563 btrfs_set_stack_inode_uid(inode_item, inode->i_uid);
1564 btrfs_set_stack_inode_gid(inode_item, inode->i_gid);
1565 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1566 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1567 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1568 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1569 btrfs_set_stack_inode_generation(inode_item,
1570 BTRFS_I(inode)->generation);
1571 btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence);
1572 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1573 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1574 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1575 btrfs_set_stack_inode_block_group(inode_item,
1576 BTRFS_I(inode)->block_group);
1577
1578 btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
1579 inode->i_atime.tv_sec);
1580 btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
1581 inode->i_atime.tv_nsec);
1582
1583 btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
1584 inode->i_mtime.tv_sec);
1585 btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
1586 inode->i_mtime.tv_nsec);
1587
1588 btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
1589 inode->i_ctime.tv_sec);
1590 btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
1591 inode->i_ctime.tv_nsec);
1592}
1593
1594int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1595 struct btrfs_root *root, struct inode *inode)
1596{
1597 struct btrfs_delayed_node *delayed_node;
1598 int ret;
1599
1600 delayed_node = btrfs_get_or_create_delayed_node(inode);
1601 if (IS_ERR(delayed_node))
1602 return PTR_ERR(delayed_node);
1603
1604 mutex_lock(&delayed_node->mutex);
1605 if (delayed_node->inode_dirty) {
1606 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1607 goto release_node;
1608 }
1609
1610 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
1611 /*
1612 * we must reserve enough space when we start a new transaction,
1613 * so reserving metadata failure is impossible
1614 */
1615 BUG_ON(ret);
1616
1617 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1618 delayed_node->inode_dirty = 1;
1619 delayed_node->count++;
1620 atomic_inc(&root->fs_info->delayed_root->items);
1621release_node:
1622 mutex_unlock(&delayed_node->mutex);
1623 btrfs_release_delayed_node(delayed_node);
1624 return ret;
1625}
1626
1627static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1628{
1629 struct btrfs_root *root = delayed_node->root;
1630 struct btrfs_delayed_item *curr_item, *prev_item;
1631
1632 mutex_lock(&delayed_node->mutex);
1633 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1634 while (curr_item) {
1635 btrfs_delayed_item_release_metadata(root, curr_item);
1636 prev_item = curr_item;
1637 curr_item = __btrfs_next_delayed_item(prev_item);
1638 btrfs_release_delayed_item(prev_item);
1639 }
1640
1641 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1642 while (curr_item) {
1643 btrfs_delayed_item_release_metadata(root, curr_item);
1644 prev_item = curr_item;
1645 curr_item = __btrfs_next_delayed_item(prev_item);
1646 btrfs_release_delayed_item(prev_item);
1647 }
1648
1649 if (delayed_node->inode_dirty) {
1650 btrfs_delayed_inode_release_metadata(root, delayed_node);
1651 btrfs_release_delayed_inode(delayed_node);
1652 }
1653 mutex_unlock(&delayed_node->mutex);
1654}
1655
1656void btrfs_kill_delayed_inode_items(struct inode *inode)
1657{
1658 struct btrfs_delayed_node *delayed_node;
1659
1660 delayed_node = btrfs_get_delayed_node(inode);
1661 if (!delayed_node)
1662 return;
1663
1664 __btrfs_kill_delayed_node(delayed_node);
1665 btrfs_release_delayed_node(delayed_node);
1666}
1667
1668void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1669{
1670 u64 inode_id = 0;
1671 struct btrfs_delayed_node *delayed_nodes[8];
1672 int i, n;
1673
1674 while (1) {
1675 spin_lock(&root->inode_lock);
1676 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1677 (void **)delayed_nodes, inode_id,
1678 ARRAY_SIZE(delayed_nodes));
1679 if (!n) {
1680 spin_unlock(&root->inode_lock);
1681 break;
1682 }
1683
1684 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1685
1686 for (i = 0; i < n; i++)
1687 atomic_inc(&delayed_nodes[i]->refs);
1688 spin_unlock(&root->inode_lock);
1689
1690 for (i = 0; i < n; i++) {
1691 __btrfs_kill_delayed_node(delayed_nodes[i]);
1692 btrfs_release_delayed_node(delayed_nodes[i]);
1693 }
1694 }
1695}
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
new file mode 100644
index 000000000000..eb7d240aa648
--- /dev/null
+++ b/fs/btrfs/delayed-inode.h
@@ -0,0 +1,141 @@
1/*
2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
18 */
19
20#ifndef __DELAYED_TREE_OPERATION_H
21#define __DELAYED_TREE_OPERATION_H
22
23#include <linux/rbtree.h>
24#include <linux/spinlock.h>
25#include <linux/mutex.h>
26#include <linux/list.h>
27#include <linux/wait.h>
28#include <asm/atomic.h>
29
30#include "ctree.h"
31
32/* types of the delayed item */
33#define BTRFS_DELAYED_INSERTION_ITEM 1
34#define BTRFS_DELAYED_DELETION_ITEM 2
35
36struct btrfs_delayed_root {
37 spinlock_t lock;
38 struct list_head node_list;
39 /*
40 * Used for delayed nodes which is waiting to be dealt with by the
41 * worker. If the delayed node is inserted into the work queue, we
42 * drop it from this list.
43 */
44 struct list_head prepare_list;
45 atomic_t items; /* for delayed items */
46 int nodes; /* for delayed nodes */
47 wait_queue_head_t wait;
48};
49
50struct btrfs_delayed_node {
51 u64 inode_id;
52 u64 bytes_reserved;
53 struct btrfs_root *root;
54 /* Used to add the node into the delayed root's node list. */
55 struct list_head n_list;
56 /*
57 * Used to add the node into the prepare list, the nodes in this list
58 * is waiting to be dealt with by the async worker.
59 */
60 struct list_head p_list;
61 struct rb_root ins_root;
62 struct rb_root del_root;
63 struct mutex mutex;
64 struct btrfs_inode_item inode_item;
65 atomic_t refs;
66 u64 index_cnt;
67 bool in_list;
68 bool inode_dirty;
69 int count;
70};
71
72struct btrfs_delayed_item {
73 struct rb_node rb_node;
74 struct btrfs_key key;
75 struct list_head tree_list; /* used for batch insert/delete items */
76 struct list_head readdir_list; /* used for readdir items */
77 u64 bytes_reserved;
78 struct btrfs_block_rsv *block_rsv;
79 struct btrfs_delayed_node *delayed_node;
80 atomic_t refs;
81 int ins_or_del;
82 u32 data_len;
83 char data[0];
84};
85
86static inline void btrfs_init_delayed_root(
87 struct btrfs_delayed_root *delayed_root)
88{
89 atomic_set(&delayed_root->items, 0);
90 delayed_root->nodes = 0;
91 spin_lock_init(&delayed_root->lock);
92 init_waitqueue_head(&delayed_root->wait);
93 INIT_LIST_HEAD(&delayed_root->node_list);
94 INIT_LIST_HEAD(&delayed_root->prepare_list);
95}
96
97int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
98 struct btrfs_root *root, const char *name,
99 int name_len, struct inode *dir,
100 struct btrfs_disk_key *disk_key, u8 type,
101 u64 index);
102
103int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
104 struct btrfs_root *root, struct inode *dir,
105 u64 index);
106
107int btrfs_inode_delayed_dir_index_count(struct inode *inode);
108
109int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
110 struct btrfs_root *root);
111
112void btrfs_balance_delayed_items(struct btrfs_root *root);
113
114int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
115 struct inode *inode);
116/* Used for evicting the inode. */
117void btrfs_remove_delayed_node(struct inode *inode);
118void btrfs_kill_delayed_inode_items(struct inode *inode);
119
120
121int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
122 struct btrfs_root *root, struct inode *inode);
123
124/* Used for drop dead root */
125void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
126
127/* Used for readdir() */
128void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
129 struct list_head *del_list);
130void btrfs_put_delayed_items(struct list_head *ins_list,
131 struct list_head *del_list);
132int btrfs_should_delete_dir_index(struct list_head *del_list,
133 u64 index);
134int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
135 filldir_t filldir,
136 struct list_head *ins_list);
137
138/* for init */
139int __init btrfs_delayed_inode_init(void);
140void btrfs_delayed_inode_exit(void);
141#endif
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index bce28f653899..125cf76fcd08 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -281,44 +281,6 @@ again:
281} 281}
282 282
283/* 283/*
284 * This checks to see if there are any delayed refs in the
285 * btree for a given bytenr. It returns one if it finds any
286 * and zero otherwise.
287 *
288 * If it only finds a head node, it returns 0.
289 *
290 * The idea is to use this when deciding if you can safely delete an
291 * extent from the extent allocation tree. There may be a pending
292 * ref in the rbtree that adds or removes references, so as long as this
293 * returns one you need to leave the BTRFS_EXTENT_ITEM in the extent
294 * allocation tree.
295 */
296int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr)
297{
298 struct btrfs_delayed_ref_node *ref;
299 struct btrfs_delayed_ref_root *delayed_refs;
300 struct rb_node *prev_node;
301 int ret = 0;
302
303 delayed_refs = &trans->transaction->delayed_refs;
304 spin_lock(&delayed_refs->lock);
305
306 ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
307 if (ref) {
308 prev_node = rb_prev(&ref->rb_node);
309 if (!prev_node)
310 goto out;
311 ref = rb_entry(prev_node, struct btrfs_delayed_ref_node,
312 rb_node);
313 if (ref->bytenr == bytenr)
314 ret = 1;
315 }
316out:
317 spin_unlock(&delayed_refs->lock);
318 return ret;
319}
320
321/*
322 * helper function to update an extent delayed ref in the 284 * helper function to update an extent delayed ref in the
323 * rbtree. existing and update must both have the same 285 * rbtree. existing and update must both have the same
324 * bytenr and parent 286 * bytenr and parent
@@ -747,79 +709,3 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
747 return btrfs_delayed_node_to_head(ref); 709 return btrfs_delayed_node_to_head(ref);
748 return NULL; 710 return NULL;
749} 711}
750
751/*
752 * add a delayed ref to the tree. This does all of the accounting required
753 * to make sure the delayed ref is eventually processed before this
754 * transaction commits.
755 *
756 * The main point of this call is to add and remove a backreference in a single
757 * shot, taking the lock only once, and only searching for the head node once.
758 *
759 * It is the same as doing a ref add and delete in two separate calls.
760 */
761#if 0
762int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
763 u64 bytenr, u64 num_bytes, u64 orig_parent,
764 u64 parent, u64 orig_ref_root, u64 ref_root,
765 u64 orig_ref_generation, u64 ref_generation,
766 u64 owner_objectid, int pin)
767{
768 struct btrfs_delayed_ref *ref;
769 struct btrfs_delayed_ref *old_ref;
770 struct btrfs_delayed_ref_head *head_ref;
771 struct btrfs_delayed_ref_root *delayed_refs;
772 int ret;
773
774 ref = kmalloc(sizeof(*ref), GFP_NOFS);
775 if (!ref)
776 return -ENOMEM;
777
778 old_ref = kmalloc(sizeof(*old_ref), GFP_NOFS);
779 if (!old_ref) {
780 kfree(ref);
781 return -ENOMEM;
782 }
783
784 /*
785 * the parent = 0 case comes from cases where we don't actually
786 * know the parent yet. It will get updated later via a add/drop
787 * pair.
788 */
789 if (parent == 0)
790 parent = bytenr;
791 if (orig_parent == 0)
792 orig_parent = bytenr;
793
794 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
795 if (!head_ref) {
796 kfree(ref);
797 kfree(old_ref);
798 return -ENOMEM;
799 }
800 delayed_refs = &trans->transaction->delayed_refs;
801 spin_lock(&delayed_refs->lock);
802
803 /*
804 * insert both the head node and the new ref without dropping
805 * the spin lock
806 */
807 ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes,
808 (u64)-1, 0, 0, 0,
809 BTRFS_UPDATE_DELAYED_HEAD, 0);
810 BUG_ON(ret);
811
812 ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes,
813 parent, ref_root, ref_generation,
814 owner_objectid, BTRFS_ADD_DELAYED_REF, 0);
815 BUG_ON(ret);
816
817 ret = __btrfs_add_delayed_ref(trans, &old_ref->node, bytenr, num_bytes,
818 orig_parent, orig_ref_root,
819 orig_ref_generation, owner_objectid,
820 BTRFS_DROP_DELAYED_REF, pin);
821 BUG_ON(ret);
822 spin_unlock(&delayed_refs->lock);
823 return 0;
824}
825#endif
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 50e3cf92fbda..e287e3b0eab0 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -166,12 +166,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
166 166
167struct btrfs_delayed_ref_head * 167struct btrfs_delayed_ref_head *
168btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); 168btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
169int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr);
170int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
171 u64 bytenr, u64 num_bytes, u64 orig_parent,
172 u64 parent, u64 orig_ref_root, u64 ref_root,
173 u64 orig_ref_generation, u64 ref_generation,
174 u64 owner_objectid, int pin);
175int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, 169int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
176 struct btrfs_delayed_ref_head *head); 170 struct btrfs_delayed_ref_head *head);
177int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, 171int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index c62f02f6ae69..685f2593c4f0 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -50,7 +50,6 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
50 if (di) 50 if (di)
51 return ERR_PTR(-EEXIST); 51 return ERR_PTR(-EEXIST);
52 ret = btrfs_extend_item(trans, root, path, data_size); 52 ret = btrfs_extend_item(trans, root, path, data_size);
53 WARN_ON(ret > 0);
54 } 53 }
55 if (ret < 0) 54 if (ret < 0)
56 return ERR_PTR(ret); 55 return ERR_PTR(ret);
@@ -124,8 +123,9 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
124 * to use for the second index (if one is created). 123 * to use for the second index (if one is created).
125 */ 124 */
126int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root 125int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
127 *root, const char *name, int name_len, u64 dir, 126 *root, const char *name, int name_len,
128 struct btrfs_key *location, u8 type, u64 index) 127 struct inode *dir, struct btrfs_key *location,
128 u8 type, u64 index)
129{ 129{
130 int ret = 0; 130 int ret = 0;
131 int ret2 = 0; 131 int ret2 = 0;
@@ -137,13 +137,17 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
137 struct btrfs_disk_key disk_key; 137 struct btrfs_disk_key disk_key;
138 u32 data_size; 138 u32 data_size;
139 139
140 key.objectid = dir; 140 key.objectid = btrfs_ino(dir);
141 btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY); 141 btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
142 key.offset = btrfs_name_hash(name, name_len); 142 key.offset = btrfs_name_hash(name, name_len);
143 143
144 path = btrfs_alloc_path(); 144 path = btrfs_alloc_path();
145 if (!path)
146 return -ENOMEM;
145 path->leave_spinning = 1; 147 path->leave_spinning = 1;
146 148
149 btrfs_cpu_key_to_disk(&disk_key, location);
150
147 data_size = sizeof(*dir_item) + name_len; 151 data_size = sizeof(*dir_item) + name_len;
148 dir_item = insert_with_overflow(trans, root, path, &key, data_size, 152 dir_item = insert_with_overflow(trans, root, path, &key, data_size,
149 name, name_len); 153 name, name_len);
@@ -155,7 +159,6 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
155 } 159 }
156 160
157 leaf = path->nodes[0]; 161 leaf = path->nodes[0];
158 btrfs_cpu_key_to_disk(&disk_key, location);
159 btrfs_set_dir_item_key(leaf, dir_item, &disk_key); 162 btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
160 btrfs_set_dir_type(leaf, dir_item, type); 163 btrfs_set_dir_type(leaf, dir_item, type);
161 btrfs_set_dir_data_len(leaf, dir_item, 0); 164 btrfs_set_dir_data_len(leaf, dir_item, 0);
@@ -172,29 +175,11 @@ second_insert:
172 ret = 0; 175 ret = 0;
173 goto out_free; 176 goto out_free;
174 } 177 }
175 btrfs_release_path(root, path); 178 btrfs_release_path(path);
176
177 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
178 key.offset = index;
179 dir_item = insert_with_overflow(trans, root, path, &key, data_size,
180 name, name_len);
181 if (IS_ERR(dir_item)) {
182 ret2 = PTR_ERR(dir_item);
183 goto out_free;
184 }
185 leaf = path->nodes[0];
186 btrfs_cpu_key_to_disk(&disk_key, location);
187 btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
188 btrfs_set_dir_type(leaf, dir_item, type);
189 btrfs_set_dir_data_len(leaf, dir_item, 0);
190 btrfs_set_dir_name_len(leaf, dir_item, name_len);
191 btrfs_set_dir_transid(leaf, dir_item, trans->transid);
192 name_ptr = (unsigned long)(dir_item + 1);
193 write_extent_buffer(leaf, name, name_ptr, name_len);
194 btrfs_mark_buffer_dirty(leaf);
195 179
180 ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir,
181 &disk_key, type, index);
196out_free: 182out_free:
197
198 btrfs_free_path(path); 183 btrfs_free_path(path);
199 if (ret) 184 if (ret)
200 return ret; 185 return ret;
@@ -452,7 +437,7 @@ int verify_dir_item(struct btrfs_root *root,
452 namelen = XATTR_NAME_MAX; 437 namelen = XATTR_NAME_MAX;
453 438
454 if (btrfs_dir_name_len(leaf, dir_item) > namelen) { 439 if (btrfs_dir_name_len(leaf, dir_item) > namelen) {
455 printk(KERN_CRIT "btrfS: invalid dir item name len: %u\n", 440 printk(KERN_CRIT "btrfs: invalid dir item name len: %u\n",
456 (unsigned)btrfs_dir_data_len(leaf, dir_item)); 441 (unsigned)btrfs_dir_data_len(leaf, dir_item));
457 return 1; 442 return 1;
458 } 443 }
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 228cf36ece83..98b6a71decba 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -29,6 +29,7 @@
29#include <linux/crc32c.h> 29#include <linux/crc32c.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/migrate.h> 31#include <linux/migrate.h>
32#include <linux/ratelimit.h>
32#include <asm/unaligned.h> 33#include <asm/unaligned.h>
33#include "compat.h" 34#include "compat.h"
34#include "ctree.h" 35#include "ctree.h"
@@ -41,6 +42,7 @@
41#include "locking.h" 42#include "locking.h"
42#include "tree-log.h" 43#include "tree-log.h"
43#include "free-space-cache.h" 44#include "free-space-cache.h"
45#include "inode-map.h"
44 46
45static struct extent_io_ops btree_extent_io_ops; 47static struct extent_io_ops btree_extent_io_ops;
46static void end_workqueue_fn(struct btrfs_work *work); 48static void end_workqueue_fn(struct btrfs_work *work);
@@ -137,7 +139,7 @@ static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = {
137 * that covers the entire device 139 * that covers the entire device
138 */ 140 */
139static struct extent_map *btree_get_extent(struct inode *inode, 141static struct extent_map *btree_get_extent(struct inode *inode,
140 struct page *page, size_t page_offset, u64 start, u64 len, 142 struct page *page, size_t pg_offset, u64 start, u64 len,
141 int create) 143 int create)
142{ 144{
143 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 145 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
@@ -154,7 +156,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
154 } 156 }
155 read_unlock(&em_tree->lock); 157 read_unlock(&em_tree->lock);
156 158
157 em = alloc_extent_map(GFP_NOFS); 159 em = alloc_extent_map();
158 if (!em) { 160 if (!em) {
159 em = ERR_PTR(-ENOMEM); 161 em = ERR_PTR(-ENOMEM);
160 goto out; 162 goto out;
@@ -254,14 +256,12 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
254 memcpy(&found, result, csum_size); 256 memcpy(&found, result, csum_size);
255 257
256 read_extent_buffer(buf, &val, 0, csum_size); 258 read_extent_buffer(buf, &val, 0, csum_size);
257 if (printk_ratelimit()) { 259 printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
258 printk(KERN_INFO "btrfs: %s checksum verify "
259 "failed on %llu wanted %X found %X " 260 "failed on %llu wanted %X found %X "
260 "level %d\n", 261 "level %d\n",
261 root->fs_info->sb->s_id, 262 root->fs_info->sb->s_id,
262 (unsigned long long)buf->start, val, found, 263 (unsigned long long)buf->start, val, found,
263 btrfs_header_level(buf)); 264 btrfs_header_level(buf));
264 }
265 if (result != (char *)&inline_result) 265 if (result != (char *)&inline_result)
266 kfree(result); 266 kfree(result);
267 return 1; 267 return 1;
@@ -296,13 +296,11 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
296 ret = 0; 296 ret = 0;
297 goto out; 297 goto out;
298 } 298 }
299 if (printk_ratelimit()) { 299 printk_ratelimited("parent transid verify failed on %llu wanted %llu "
300 printk("parent transid verify failed on %llu wanted %llu "
301 "found %llu\n", 300 "found %llu\n",
302 (unsigned long long)eb->start, 301 (unsigned long long)eb->start,
303 (unsigned long long)parent_transid, 302 (unsigned long long)parent_transid,
304 (unsigned long long)btrfs_header_generation(eb)); 303 (unsigned long long)btrfs_header_generation(eb));
305 }
306 ret = 1; 304 ret = 1;
307 clear_extent_buffer_uptodate(io_tree, eb, &cached_state); 305 clear_extent_buffer_uptodate(io_tree, eb, &cached_state);
308out: 306out:
@@ -380,7 +378,7 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
380 len = page->private >> 2; 378 len = page->private >> 2;
381 WARN_ON(len == 0); 379 WARN_ON(len == 0);
382 380
383 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); 381 eb = alloc_extent_buffer(tree, start, len, page);
384 if (eb == NULL) { 382 if (eb == NULL) {
385 WARN_ON(1); 383 WARN_ON(1);
386 goto out; 384 goto out;
@@ -525,7 +523,7 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
525 len = page->private >> 2; 523 len = page->private >> 2;
526 WARN_ON(len == 0); 524 WARN_ON(len == 0);
527 525
528 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); 526 eb = alloc_extent_buffer(tree, start, len, page);
529 if (eb == NULL) { 527 if (eb == NULL) {
530 ret = -EIO; 528 ret = -EIO;
531 goto out; 529 goto out;
@@ -533,12 +531,10 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
533 531
534 found_start = btrfs_header_bytenr(eb); 532 found_start = btrfs_header_bytenr(eb);
535 if (found_start != start) { 533 if (found_start != start) {
536 if (printk_ratelimit()) { 534 printk_ratelimited(KERN_INFO "btrfs bad tree block start "
537 printk(KERN_INFO "btrfs bad tree block start "
538 "%llu %llu\n", 535 "%llu %llu\n",
539 (unsigned long long)found_start, 536 (unsigned long long)found_start,
540 (unsigned long long)eb->start); 537 (unsigned long long)eb->start);
541 }
542 ret = -EIO; 538 ret = -EIO;
543 goto err; 539 goto err;
544 } 540 }
@@ -550,10 +546,8 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
550 goto err; 546 goto err;
551 } 547 }
552 if (check_tree_block_fsid(root, eb)) { 548 if (check_tree_block_fsid(root, eb)) {
553 if (printk_ratelimit()) { 549 printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
554 printk(KERN_INFO "btrfs bad fsid on block %llu\n",
555 (unsigned long long)eb->start); 550 (unsigned long long)eb->start);
556 }
557 ret = -EIO; 551 ret = -EIO;
558 goto err; 552 goto err;
559 } 553 }
@@ -650,12 +644,6 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
650 return 256 * limit; 644 return 256 * limit;
651} 645}
652 646
653int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
654{
655 return atomic_read(&info->nr_async_bios) >
656 btrfs_async_submit_limit(info);
657}
658
659static void run_one_async_start(struct btrfs_work *work) 647static void run_one_async_start(struct btrfs_work *work)
660{ 648{
661 struct async_submit_bio *async; 649 struct async_submit_bio *async;
@@ -963,7 +951,7 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
963 struct inode *btree_inode = root->fs_info->btree_inode; 951 struct inode *btree_inode = root->fs_info->btree_inode;
964 struct extent_buffer *eb; 952 struct extent_buffer *eb;
965 eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree, 953 eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
966 bytenr, blocksize, GFP_NOFS); 954 bytenr, blocksize);
967 return eb; 955 return eb;
968} 956}
969 957
@@ -974,7 +962,7 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
974 struct extent_buffer *eb; 962 struct extent_buffer *eb;
975 963
976 eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree, 964 eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
977 bytenr, blocksize, NULL, GFP_NOFS); 965 bytenr, blocksize, NULL);
978 return eb; 966 return eb;
979} 967}
980 968
@@ -1058,13 +1046,13 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1058 root->name = NULL; 1046 root->name = NULL;
1059 root->in_sysfs = 0; 1047 root->in_sysfs = 0;
1060 root->inode_tree = RB_ROOT; 1048 root->inode_tree = RB_ROOT;
1049 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1061 root->block_rsv = NULL; 1050 root->block_rsv = NULL;
1062 root->orphan_block_rsv = NULL; 1051 root->orphan_block_rsv = NULL;
1063 1052
1064 INIT_LIST_HEAD(&root->dirty_list); 1053 INIT_LIST_HEAD(&root->dirty_list);
1065 INIT_LIST_HEAD(&root->orphan_list); 1054 INIT_LIST_HEAD(&root->orphan_list);
1066 INIT_LIST_HEAD(&root->root_list); 1055 INIT_LIST_HEAD(&root->root_list);
1067 spin_lock_init(&root->node_lock);
1068 spin_lock_init(&root->orphan_lock); 1056 spin_lock_init(&root->orphan_lock);
1069 spin_lock_init(&root->inode_lock); 1057 spin_lock_init(&root->inode_lock);
1070 spin_lock_init(&root->accounting_lock); 1058 spin_lock_init(&root->accounting_lock);
@@ -1080,7 +1068,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1080 root->log_transid = 0; 1068 root->log_transid = 0;
1081 root->last_log_commit = 0; 1069 root->last_log_commit = 0;
1082 extent_io_tree_init(&root->dirty_log_pages, 1070 extent_io_tree_init(&root->dirty_log_pages,
1083 fs_info->btree_inode->i_mapping, GFP_NOFS); 1071 fs_info->btree_inode->i_mapping);
1084 1072
1085 memset(&root->root_key, 0, sizeof(root->root_key)); 1073 memset(&root->root_key, 0, sizeof(root->root_key));
1086 memset(&root->root_item, 0, sizeof(root->root_item)); 1074 memset(&root->root_item, 0, sizeof(root->root_item));
@@ -1283,21 +1271,6 @@ out:
1283 return root; 1271 return root;
1284} 1272}
1285 1273
1286struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1287 u64 root_objectid)
1288{
1289 struct btrfs_root *root;
1290
1291 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
1292 return fs_info->tree_root;
1293 if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
1294 return fs_info->extent_root;
1295
1296 root = radix_tree_lookup(&fs_info->fs_roots_radix,
1297 (unsigned long)root_objectid);
1298 return root;
1299}
1300
1301struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, 1274struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1302 struct btrfs_key *location) 1275 struct btrfs_key *location)
1303{ 1276{
@@ -1326,6 +1299,19 @@ again:
1326 if (IS_ERR(root)) 1299 if (IS_ERR(root))
1327 return root; 1300 return root;
1328 1301
1302 root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1303 if (!root->free_ino_ctl)
1304 goto fail;
1305 root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1306 GFP_NOFS);
1307 if (!root->free_ino_pinned)
1308 goto fail;
1309
1310 btrfs_init_free_ino_ctl(root);
1311 mutex_init(&root->fs_commit_mutex);
1312 spin_lock_init(&root->cache_lock);
1313 init_waitqueue_head(&root->cache_wait);
1314
1329 set_anon_super(&root->anon_super, NULL); 1315 set_anon_super(&root->anon_super, NULL);
1330 1316
1331 if (btrfs_root_refs(&root->root_item) == 0) { 1317 if (btrfs_root_refs(&root->root_item) == 0) {
@@ -1369,41 +1355,6 @@ fail:
1369 return ERR_PTR(ret); 1355 return ERR_PTR(ret);
1370} 1356}
1371 1357
1372struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
1373 struct btrfs_key *location,
1374 const char *name, int namelen)
1375{
1376 return btrfs_read_fs_root_no_name(fs_info, location);
1377#if 0
1378 struct btrfs_root *root;
1379 int ret;
1380
1381 root = btrfs_read_fs_root_no_name(fs_info, location);
1382 if (!root)
1383 return NULL;
1384
1385 if (root->in_sysfs)
1386 return root;
1387
1388 ret = btrfs_set_root_name(root, name, namelen);
1389 if (ret) {
1390 free_extent_buffer(root->node);
1391 kfree(root);
1392 return ERR_PTR(ret);
1393 }
1394
1395 ret = btrfs_sysfs_add_root(root);
1396 if (ret) {
1397 free_extent_buffer(root->node);
1398 kfree(root->name);
1399 kfree(root);
1400 return ERR_PTR(ret);
1401 }
1402 root->in_sysfs = 1;
1403 return root;
1404#endif
1405}
1406
1407static int btrfs_congested_fn(void *congested_data, int bdi_bits) 1358static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1408{ 1359{
1409 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; 1360 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
@@ -1411,7 +1362,8 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1411 struct btrfs_device *device; 1362 struct btrfs_device *device;
1412 struct backing_dev_info *bdi; 1363 struct backing_dev_info *bdi;
1413 1364
1414 list_for_each_entry(device, &info->fs_devices->devices, dev_list) { 1365 rcu_read_lock();
1366 list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1415 if (!device->bdev) 1367 if (!device->bdev)
1416 continue; 1368 continue;
1417 bdi = blk_get_backing_dev_info(device->bdev); 1369 bdi = blk_get_backing_dev_info(device->bdev);
@@ -1420,6 +1372,7 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1420 break; 1372 break;
1421 } 1373 }
1422 } 1374 }
1375 rcu_read_unlock();
1423 return ret; 1376 return ret;
1424} 1377}
1425 1378
@@ -1522,6 +1475,7 @@ static int cleaner_kthread(void *arg)
1522 btrfs_run_delayed_iputs(root); 1475 btrfs_run_delayed_iputs(root);
1523 btrfs_clean_old_snapshots(root); 1476 btrfs_clean_old_snapshots(root);
1524 mutex_unlock(&root->fs_info->cleaner_mutex); 1477 mutex_unlock(&root->fs_info->cleaner_mutex);
1478 btrfs_run_defrag_inodes(root->fs_info);
1525 } 1479 }
1526 1480
1527 if (freezing(current)) { 1481 if (freezing(current)) {
@@ -1611,7 +1565,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1611 struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root), 1565 struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
1612 GFP_NOFS); 1566 GFP_NOFS);
1613 struct btrfs_root *tree_root = btrfs_sb(sb); 1567 struct btrfs_root *tree_root = btrfs_sb(sb);
1614 struct btrfs_fs_info *fs_info = tree_root->fs_info; 1568 struct btrfs_fs_info *fs_info = NULL;
1615 struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root), 1569 struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
1616 GFP_NOFS); 1570 GFP_NOFS);
1617 struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root), 1571 struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
@@ -1623,11 +1577,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1623 1577
1624 struct btrfs_super_block *disk_super; 1578 struct btrfs_super_block *disk_super;
1625 1579
1626 if (!extent_root || !tree_root || !fs_info || 1580 if (!extent_root || !tree_root || !tree_root->fs_info ||
1627 !chunk_root || !dev_root || !csum_root) { 1581 !chunk_root || !dev_root || !csum_root) {
1628 err = -ENOMEM; 1582 err = -ENOMEM;
1629 goto fail; 1583 goto fail;
1630 } 1584 }
1585 fs_info = tree_root->fs_info;
1631 1586
1632 ret = init_srcu_struct(&fs_info->subvol_srcu); 1587 ret = init_srcu_struct(&fs_info->subvol_srcu);
1633 if (ret) { 1588 if (ret) {
@@ -1662,6 +1617,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1662 spin_lock_init(&fs_info->ref_cache_lock); 1617 spin_lock_init(&fs_info->ref_cache_lock);
1663 spin_lock_init(&fs_info->fs_roots_radix_lock); 1618 spin_lock_init(&fs_info->fs_roots_radix_lock);
1664 spin_lock_init(&fs_info->delayed_iput_lock); 1619 spin_lock_init(&fs_info->delayed_iput_lock);
1620 spin_lock_init(&fs_info->defrag_inodes_lock);
1665 1621
1666 init_completion(&fs_info->kobj_unregister); 1622 init_completion(&fs_info->kobj_unregister);
1667 fs_info->tree_root = tree_root; 1623 fs_info->tree_root = tree_root;
@@ -1684,15 +1640,35 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1684 atomic_set(&fs_info->async_delalloc_pages, 0); 1640 atomic_set(&fs_info->async_delalloc_pages, 0);
1685 atomic_set(&fs_info->async_submit_draining, 0); 1641 atomic_set(&fs_info->async_submit_draining, 0);
1686 atomic_set(&fs_info->nr_async_bios, 0); 1642 atomic_set(&fs_info->nr_async_bios, 0);
1643 atomic_set(&fs_info->defrag_running, 0);
1687 fs_info->sb = sb; 1644 fs_info->sb = sb;
1688 fs_info->max_inline = 8192 * 1024; 1645 fs_info->max_inline = 8192 * 1024;
1689 fs_info->metadata_ratio = 0; 1646 fs_info->metadata_ratio = 0;
1647 fs_info->defrag_inodes = RB_ROOT;
1690 1648
1691 fs_info->thread_pool_size = min_t(unsigned long, 1649 fs_info->thread_pool_size = min_t(unsigned long,
1692 num_online_cpus() + 2, 8); 1650 num_online_cpus() + 2, 8);
1693 1651
1694 INIT_LIST_HEAD(&fs_info->ordered_extents); 1652 INIT_LIST_HEAD(&fs_info->ordered_extents);
1695 spin_lock_init(&fs_info->ordered_extent_lock); 1653 spin_lock_init(&fs_info->ordered_extent_lock);
1654 fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
1655 GFP_NOFS);
1656 if (!fs_info->delayed_root) {
1657 err = -ENOMEM;
1658 goto fail_iput;
1659 }
1660 btrfs_init_delayed_root(fs_info->delayed_root);
1661
1662 mutex_init(&fs_info->scrub_lock);
1663 atomic_set(&fs_info->scrubs_running, 0);
1664 atomic_set(&fs_info->scrub_pause_req, 0);
1665 atomic_set(&fs_info->scrubs_paused, 0);
1666 atomic_set(&fs_info->scrub_cancel_req, 0);
1667 init_waitqueue_head(&fs_info->scrub_pause_wait);
1668 init_rwsem(&fs_info->scrub_super_lock);
1669 fs_info->scrub_workers_refcnt = 0;
1670 btrfs_init_workers(&fs_info->scrub_workers, "scrub",
1671 fs_info->thread_pool_size, &fs_info->generic_worker);
1696 1672
1697 sb->s_blocksize = 4096; 1673 sb->s_blocksize = 4096;
1698 sb->s_blocksize_bits = blksize_bits(4096); 1674 sb->s_blocksize_bits = blksize_bits(4096);
@@ -1711,10 +1687,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1711 1687
1712 RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node); 1688 RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
1713 extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree, 1689 extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1714 fs_info->btree_inode->i_mapping, 1690 fs_info->btree_inode->i_mapping);
1715 GFP_NOFS); 1691 extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
1716 extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
1717 GFP_NOFS);
1718 1692
1719 BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops; 1693 BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1720 1694
@@ -1728,9 +1702,9 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1728 fs_info->block_group_cache_tree = RB_ROOT; 1702 fs_info->block_group_cache_tree = RB_ROOT;
1729 1703
1730 extent_io_tree_init(&fs_info->freed_extents[0], 1704 extent_io_tree_init(&fs_info->freed_extents[0],
1731 fs_info->btree_inode->i_mapping, GFP_NOFS); 1705 fs_info->btree_inode->i_mapping);
1732 extent_io_tree_init(&fs_info->freed_extents[1], 1706 extent_io_tree_init(&fs_info->freed_extents[1],
1733 fs_info->btree_inode->i_mapping, GFP_NOFS); 1707 fs_info->btree_inode->i_mapping);
1734 fs_info->pinned_extents = &fs_info->freed_extents[0]; 1708 fs_info->pinned_extents = &fs_info->freed_extents[0];
1735 fs_info->do_barriers = 1; 1709 fs_info->do_barriers = 1;
1736 1710
@@ -1760,7 +1734,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1760 bh = btrfs_read_dev_super(fs_devices->latest_bdev); 1734 bh = btrfs_read_dev_super(fs_devices->latest_bdev);
1761 if (!bh) { 1735 if (!bh) {
1762 err = -EINVAL; 1736 err = -EINVAL;
1763 goto fail_iput; 1737 goto fail_alloc;
1764 } 1738 }
1765 1739
1766 memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy)); 1740 memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
@@ -1772,7 +1746,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1772 1746
1773 disk_super = &fs_info->super_copy; 1747 disk_super = &fs_info->super_copy;
1774 if (!btrfs_super_root(disk_super)) 1748 if (!btrfs_super_root(disk_super))
1775 goto fail_iput; 1749 goto fail_alloc;
1776 1750
1777 /* check FS state, whether FS is broken. */ 1751 /* check FS state, whether FS is broken. */
1778 fs_info->fs_state |= btrfs_super_flags(disk_super); 1752 fs_info->fs_state |= btrfs_super_flags(disk_super);
@@ -1788,7 +1762,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1788 ret = btrfs_parse_options(tree_root, options); 1762 ret = btrfs_parse_options(tree_root, options);
1789 if (ret) { 1763 if (ret) {
1790 err = ret; 1764 err = ret;
1791 goto fail_iput; 1765 goto fail_alloc;
1792 } 1766 }
1793 1767
1794 features = btrfs_super_incompat_flags(disk_super) & 1768 features = btrfs_super_incompat_flags(disk_super) &
@@ -1798,7 +1772,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1798 "unsupported optional features (%Lx).\n", 1772 "unsupported optional features (%Lx).\n",
1799 (unsigned long long)features); 1773 (unsigned long long)features);
1800 err = -EINVAL; 1774 err = -EINVAL;
1801 goto fail_iput; 1775 goto fail_alloc;
1802 } 1776 }
1803 1777
1804 features = btrfs_super_incompat_flags(disk_super); 1778 features = btrfs_super_incompat_flags(disk_super);
@@ -1814,7 +1788,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1814 "unsupported option features (%Lx).\n", 1788 "unsupported option features (%Lx).\n",
1815 (unsigned long long)features); 1789 (unsigned long long)features);
1816 err = -EINVAL; 1790 err = -EINVAL;
1817 goto fail_iput; 1791 goto fail_alloc;
1818 } 1792 }
1819 1793
1820 btrfs_init_workers(&fs_info->generic_worker, 1794 btrfs_init_workers(&fs_info->generic_worker,
@@ -1861,6 +1835,9 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1861 &fs_info->generic_worker); 1835 &fs_info->generic_worker);
1862 btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write", 1836 btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
1863 1, &fs_info->generic_worker); 1837 1, &fs_info->generic_worker);
1838 btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
1839 fs_info->thread_pool_size,
1840 &fs_info->generic_worker);
1864 1841
1865 /* 1842 /*
1866 * endios are largely parallel and should have a very 1843 * endios are largely parallel and should have a very
@@ -1882,6 +1859,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1882 btrfs_start_workers(&fs_info->endio_meta_write_workers, 1); 1859 btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
1883 btrfs_start_workers(&fs_info->endio_write_workers, 1); 1860 btrfs_start_workers(&fs_info->endio_write_workers, 1);
1884 btrfs_start_workers(&fs_info->endio_freespace_worker, 1); 1861 btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
1862 btrfs_start_workers(&fs_info->delayed_workers, 1);
1885 1863
1886 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); 1864 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1887 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, 1865 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
@@ -2138,6 +2116,9 @@ fail_sb_buffer:
2138 btrfs_stop_workers(&fs_info->endio_write_workers); 2116 btrfs_stop_workers(&fs_info->endio_write_workers);
2139 btrfs_stop_workers(&fs_info->endio_freespace_worker); 2117 btrfs_stop_workers(&fs_info->endio_freespace_worker);
2140 btrfs_stop_workers(&fs_info->submit_workers); 2118 btrfs_stop_workers(&fs_info->submit_workers);
2119 btrfs_stop_workers(&fs_info->delayed_workers);
2120fail_alloc:
2121 kfree(fs_info->delayed_root);
2141fail_iput: 2122fail_iput:
2142 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 2123 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2143 iput(fs_info->btree_inode); 2124 iput(fs_info->btree_inode);
@@ -2165,11 +2146,9 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2165 if (uptodate) { 2146 if (uptodate) {
2166 set_buffer_uptodate(bh); 2147 set_buffer_uptodate(bh);
2167 } else { 2148 } else {
2168 if (printk_ratelimit()) { 2149 printk_ratelimited(KERN_WARNING "lost page write due to "
2169 printk(KERN_WARNING "lost page write due to "
2170 "I/O error on %s\n", 2150 "I/O error on %s\n",
2171 bdevname(bh->b_bdev, b)); 2151 bdevname(bh->b_bdev, b));
2172 }
2173 /* note, we dont' set_buffer_write_io_error because we have 2152 /* note, we dont' set_buffer_write_io_error because we have
2174 * our own ways of dealing with the IO errors 2153 * our own ways of dealing with the IO errors
2175 */ 2154 */
@@ -2333,7 +2312,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
2333 2312
2334 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 2313 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2335 head = &root->fs_info->fs_devices->devices; 2314 head = &root->fs_info->fs_devices->devices;
2336 list_for_each_entry(dev, head, dev_list) { 2315 list_for_each_entry_rcu(dev, head, dev_list) {
2337 if (!dev->bdev) { 2316 if (!dev->bdev) {
2338 total_errors++; 2317 total_errors++;
2339 continue; 2318 continue;
@@ -2366,7 +2345,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
2366 } 2345 }
2367 2346
2368 total_errors = 0; 2347 total_errors = 0;
2369 list_for_each_entry(dev, head, dev_list) { 2348 list_for_each_entry_rcu(dev, head, dev_list) {
2370 if (!dev->bdev) 2349 if (!dev->bdev)
2371 continue; 2350 continue;
2372 if (!dev->in_fs_metadata || !dev->writeable) 2351 if (!dev->in_fs_metadata || !dev->writeable)
@@ -2404,12 +2383,15 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2404 if (btrfs_root_refs(&root->root_item) == 0) 2383 if (btrfs_root_refs(&root->root_item) == 0)
2405 synchronize_srcu(&fs_info->subvol_srcu); 2384 synchronize_srcu(&fs_info->subvol_srcu);
2406 2385
2386 __btrfs_remove_free_space_cache(root->free_ino_pinned);
2387 __btrfs_remove_free_space_cache(root->free_ino_ctl);
2407 free_fs_root(root); 2388 free_fs_root(root);
2408 return 0; 2389 return 0;
2409} 2390}
2410 2391
2411static void free_fs_root(struct btrfs_root *root) 2392static void free_fs_root(struct btrfs_root *root)
2412{ 2393{
2394 iput(root->cache_inode);
2413 WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); 2395 WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
2414 if (root->anon_super.s_dev) { 2396 if (root->anon_super.s_dev) {
2415 down_write(&root->anon_super.s_umount); 2397 down_write(&root->anon_super.s_umount);
@@ -2417,6 +2399,8 @@ static void free_fs_root(struct btrfs_root *root)
2417 } 2399 }
2418 free_extent_buffer(root->node); 2400 free_extent_buffer(root->node);
2419 free_extent_buffer(root->commit_root); 2401 free_extent_buffer(root->commit_root);
2402 kfree(root->free_ino_ctl);
2403 kfree(root->free_ino_pinned);
2420 kfree(root->name); 2404 kfree(root->name);
2421 kfree(root); 2405 kfree(root);
2422} 2406}
@@ -2520,6 +2504,15 @@ int close_ctree(struct btrfs_root *root)
2520 fs_info->closing = 1; 2504 fs_info->closing = 1;
2521 smp_mb(); 2505 smp_mb();
2522 2506
2507 btrfs_scrub_cancel(root);
2508
2509 /* wait for any defraggers to finish */
2510 wait_event(fs_info->transaction_wait,
2511 (atomic_read(&fs_info->defrag_running) == 0));
2512
2513 /* clear out the rbtree of defraggable inodes */
2514 btrfs_run_defrag_inodes(root->fs_info);
2515
2523 btrfs_put_block_group_cache(fs_info); 2516 btrfs_put_block_group_cache(fs_info);
2524 2517
2525 /* 2518 /*
@@ -2578,6 +2571,7 @@ int close_ctree(struct btrfs_root *root)
2578 del_fs_roots(fs_info); 2571 del_fs_roots(fs_info);
2579 2572
2580 iput(fs_info->btree_inode); 2573 iput(fs_info->btree_inode);
2574 kfree(fs_info->delayed_root);
2581 2575
2582 btrfs_stop_workers(&fs_info->generic_worker); 2576 btrfs_stop_workers(&fs_info->generic_worker);
2583 btrfs_stop_workers(&fs_info->fixup_workers); 2577 btrfs_stop_workers(&fs_info->fixup_workers);
@@ -2589,6 +2583,7 @@ int close_ctree(struct btrfs_root *root)
2589 btrfs_stop_workers(&fs_info->endio_write_workers); 2583 btrfs_stop_workers(&fs_info->endio_write_workers);
2590 btrfs_stop_workers(&fs_info->endio_freespace_worker); 2584 btrfs_stop_workers(&fs_info->endio_freespace_worker);
2591 btrfs_stop_workers(&fs_info->submit_workers); 2585 btrfs_stop_workers(&fs_info->submit_workers);
2586 btrfs_stop_workers(&fs_info->delayed_workers);
2592 2587
2593 btrfs_close_devices(fs_info->fs_devices); 2588 btrfs_close_devices(fs_info->fs_devices);
2594 btrfs_mapping_tree_free(&fs_info->mapping_tree); 2589 btrfs_mapping_tree_free(&fs_info->mapping_tree);
@@ -2665,6 +2660,29 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
2665 if (current->flags & PF_MEMALLOC) 2660 if (current->flags & PF_MEMALLOC)
2666 return; 2661 return;
2667 2662
2663 btrfs_balance_delayed_items(root);
2664
2665 num_dirty = root->fs_info->dirty_metadata_bytes;
2666
2667 if (num_dirty > thresh) {
2668 balance_dirty_pages_ratelimited_nr(
2669 root->fs_info->btree_inode->i_mapping, 1);
2670 }
2671 return;
2672}
2673
2674void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
2675{
2676 /*
2677 * looks as though older kernels can get into trouble with
2678 * this code, they end up stuck in balance_dirty_pages forever
2679 */
2680 u64 num_dirty;
2681 unsigned long thresh = 32 * 1024 * 1024;
2682
2683 if (current->flags & PF_MEMALLOC)
2684 return;
2685
2668 num_dirty = root->fs_info->dirty_metadata_bytes; 2686 num_dirty = root->fs_info->dirty_metadata_bytes;
2669 2687
2670 if (num_dirty > thresh) { 2688 if (num_dirty > thresh) {
@@ -2697,7 +2715,7 @@ int btree_lock_page_hook(struct page *page)
2697 goto out; 2715 goto out;
2698 2716
2699 len = page->private >> 2; 2717 len = page->private >> 2;
2700 eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS); 2718 eb = find_extent_buffer(io_tree, bytenr, len);
2701 if (!eb) 2719 if (!eb)
2702 goto out; 2720 goto out;
2703 2721
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 07b20dc2fd95..a0b610a67aae 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -55,35 +55,20 @@ int btrfs_commit_super(struct btrfs_root *root);
55int btrfs_error_commit_super(struct btrfs_root *root); 55int btrfs_error_commit_super(struct btrfs_root *root);
56struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, 56struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
57 u64 bytenr, u32 blocksize); 57 u64 bytenr, u32 blocksize);
58struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
59 u64 root_objectid);
60struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
61 struct btrfs_key *location,
62 const char *name, int namelen);
63struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, 58struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
64 struct btrfs_key *location); 59 struct btrfs_key *location);
65struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, 60struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
66 struct btrfs_key *location); 61 struct btrfs_key *location);
67int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info); 62int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
68int btrfs_insert_dev_radix(struct btrfs_root *root,
69 struct block_device *bdev,
70 u64 device_id,
71 u64 block_start,
72 u64 num_blocks);
73void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); 63void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
64void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
74int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root); 65int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
75void btrfs_mark_buffer_dirty(struct extent_buffer *buf); 66void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
76void btrfs_mark_buffer_dirty_nonblocking(struct extent_buffer *buf);
77int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid); 67int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid);
78int btrfs_set_buffer_uptodate(struct extent_buffer *buf); 68int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
79int wait_on_tree_block_writeback(struct btrfs_root *root,
80 struct extent_buffer *buf);
81int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid); 69int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
82u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len); 70u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len);
83void btrfs_csum_final(u32 crc, char *result); 71void btrfs_csum_final(u32 crc, char *result);
84int btrfs_open_device(struct btrfs_device *dev);
85int btrfs_verify_block_csum(struct btrfs_root *root,
86 struct extent_buffer *buf);
87int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, 72int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
88 int metadata); 73 int metadata);
89int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, 74int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
@@ -91,8 +76,6 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
91 unsigned long bio_flags, u64 bio_offset, 76 unsigned long bio_flags, u64 bio_offset,
92 extent_submit_bio_hook_t *submit_bio_start, 77 extent_submit_bio_hook_t *submit_bio_start,
93 extent_submit_bio_hook_t *submit_bio_done); 78 extent_submit_bio_hook_t *submit_bio_done);
94
95int btrfs_congested_async(struct btrfs_fs_info *info, int iodone);
96unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info); 79unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
97int btrfs_write_tree_block(struct extent_buffer *buf); 80int btrfs_write_tree_block(struct extent_buffer *buf);
98int btrfs_wait_tree_block_writeback(struct extent_buffer *buf); 81int btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index b4ffad859adb..1b8dc33778f9 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -32,7 +32,7 @@ static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
32 len = BTRFS_FID_SIZE_NON_CONNECTABLE; 32 len = BTRFS_FID_SIZE_NON_CONNECTABLE;
33 type = FILEID_BTRFS_WITHOUT_PARENT; 33 type = FILEID_BTRFS_WITHOUT_PARENT;
34 34
35 fid->objectid = inode->i_ino; 35 fid->objectid = btrfs_ino(inode);
36 fid->root_objectid = BTRFS_I(inode)->root->objectid; 36 fid->root_objectid = BTRFS_I(inode)->root->objectid;
37 fid->gen = inode->i_generation; 37 fid->gen = inode->i_generation;
38 38
@@ -178,13 +178,13 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
178 if (!path) 178 if (!path)
179 return ERR_PTR(-ENOMEM); 179 return ERR_PTR(-ENOMEM);
180 180
181 if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) { 181 if (btrfs_ino(dir) == BTRFS_FIRST_FREE_OBJECTID) {
182 key.objectid = root->root_key.objectid; 182 key.objectid = root->root_key.objectid;
183 key.type = BTRFS_ROOT_BACKREF_KEY; 183 key.type = BTRFS_ROOT_BACKREF_KEY;
184 key.offset = (u64)-1; 184 key.offset = (u64)-1;
185 root = root->fs_info->tree_root; 185 root = root->fs_info->tree_root;
186 } else { 186 } else {
187 key.objectid = dir->i_ino; 187 key.objectid = btrfs_ino(dir);
188 key.type = BTRFS_INODE_REF_KEY; 188 key.type = BTRFS_INODE_REF_KEY;
189 key.offset = (u64)-1; 189 key.offset = (u64)-1;
190 } 190 }
@@ -244,6 +244,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
244 struct btrfs_key key; 244 struct btrfs_key key;
245 int name_len; 245 int name_len;
246 int ret; 246 int ret;
247 u64 ino;
247 248
248 if (!dir || !inode) 249 if (!dir || !inode)
249 return -EINVAL; 250 return -EINVAL;
@@ -251,19 +252,21 @@ static int btrfs_get_name(struct dentry *parent, char *name,
251 if (!S_ISDIR(dir->i_mode)) 252 if (!S_ISDIR(dir->i_mode))
252 return -EINVAL; 253 return -EINVAL;
253 254
255 ino = btrfs_ino(inode);
256
254 path = btrfs_alloc_path(); 257 path = btrfs_alloc_path();
255 if (!path) 258 if (!path)
256 return -ENOMEM; 259 return -ENOMEM;
257 path->leave_spinning = 1; 260 path->leave_spinning = 1;
258 261
259 if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { 262 if (ino == BTRFS_FIRST_FREE_OBJECTID) {
260 key.objectid = BTRFS_I(inode)->root->root_key.objectid; 263 key.objectid = BTRFS_I(inode)->root->root_key.objectid;
261 key.type = BTRFS_ROOT_BACKREF_KEY; 264 key.type = BTRFS_ROOT_BACKREF_KEY;
262 key.offset = (u64)-1; 265 key.offset = (u64)-1;
263 root = root->fs_info->tree_root; 266 root = root->fs_info->tree_root;
264 } else { 267 } else {
265 key.objectid = inode->i_ino; 268 key.objectid = ino;
266 key.offset = dir->i_ino; 269 key.offset = btrfs_ino(dir);
267 key.type = BTRFS_INODE_REF_KEY; 270 key.type = BTRFS_INODE_REF_KEY;
268 } 271 }
269 272
@@ -272,7 +275,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
272 btrfs_free_path(path); 275 btrfs_free_path(path);
273 return ret; 276 return ret;
274 } else if (ret > 0) { 277 } else if (ret > 0) {
275 if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { 278 if (ino == BTRFS_FIRST_FREE_OBJECTID) {
276 path->slots[0]--; 279 path->slots[0]--;
277 } else { 280 } else {
278 btrfs_free_path(path); 281 btrfs_free_path(path);
@@ -281,11 +284,11 @@ static int btrfs_get_name(struct dentry *parent, char *name,
281 } 284 }
282 leaf = path->nodes[0]; 285 leaf = path->nodes[0];
283 286
284 if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { 287 if (ino == BTRFS_FIRST_FREE_OBJECTID) {
285 rref = btrfs_item_ptr(leaf, path->slots[0], 288 rref = btrfs_item_ptr(leaf, path->slots[0],
286 struct btrfs_root_ref); 289 struct btrfs_root_ref);
287 name_ptr = (unsigned long)(rref + 1); 290 name_ptr = (unsigned long)(rref + 1);
288 name_len = btrfs_root_ref_name_len(leaf, rref); 291 name_len = btrfs_root_ref_name_len(leaf, rref);
289 } else { 292 } else {
290 iref = btrfs_item_ptr(leaf, path->slots[0], 293 iref = btrfs_item_ptr(leaf, path->slots[0],
291 struct btrfs_inode_ref); 294 struct btrfs_inode_ref);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9ee6bd55e16c..169bd62ce776 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -94,7 +94,7 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
94 return (cache->flags & bits) == bits; 94 return (cache->flags & bits) == bits;
95} 95}
96 96
97void btrfs_get_block_group(struct btrfs_block_group_cache *cache) 97static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
98{ 98{
99 atomic_inc(&cache->count); 99 atomic_inc(&cache->count);
100} 100}
@@ -105,6 +105,7 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
105 WARN_ON(cache->pinned > 0); 105 WARN_ON(cache->pinned > 0);
106 WARN_ON(cache->reserved > 0); 106 WARN_ON(cache->reserved > 0);
107 WARN_ON(cache->reserved_pinned > 0); 107 WARN_ON(cache->reserved_pinned > 0);
108 kfree(cache->free_space_ctl);
108 kfree(cache); 109 kfree(cache);
109 } 110 }
110} 111}
@@ -379,7 +380,7 @@ again:
379 break; 380 break;
380 381
381 caching_ctl->progress = last; 382 caching_ctl->progress = last;
382 btrfs_release_path(extent_root, path); 383 btrfs_release_path(path);
383 up_read(&fs_info->extent_commit_sem); 384 up_read(&fs_info->extent_commit_sem);
384 mutex_unlock(&caching_ctl->mutex); 385 mutex_unlock(&caching_ctl->mutex);
385 if (btrfs_transaction_in_commit(fs_info)) 386 if (btrfs_transaction_in_commit(fs_info))
@@ -754,8 +755,12 @@ again:
754 atomic_inc(&head->node.refs); 755 atomic_inc(&head->node.refs);
755 spin_unlock(&delayed_refs->lock); 756 spin_unlock(&delayed_refs->lock);
756 757
757 btrfs_release_path(root->fs_info->extent_root, path); 758 btrfs_release_path(path);
758 759
760 /*
761 * Mutex was contended, block until it's released and try
762 * again
763 */
759 mutex_lock(&head->mutex); 764 mutex_lock(&head->mutex);
760 mutex_unlock(&head->mutex); 765 mutex_unlock(&head->mutex);
761 btrfs_put_delayed_ref(&head->node); 766 btrfs_put_delayed_ref(&head->node);
@@ -934,7 +939,7 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
934 break; 939 break;
935 } 940 }
936 } 941 }
937 btrfs_release_path(root, path); 942 btrfs_release_path(path);
938 943
939 if (owner < BTRFS_FIRST_FREE_OBJECTID) 944 if (owner < BTRFS_FIRST_FREE_OBJECTID)
940 new_size += sizeof(*bi); 945 new_size += sizeof(*bi);
@@ -947,7 +952,6 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
947 BUG_ON(ret); 952 BUG_ON(ret);
948 953
949 ret = btrfs_extend_item(trans, root, path, new_size); 954 ret = btrfs_extend_item(trans, root, path, new_size);
950 BUG_ON(ret);
951 955
952 leaf = path->nodes[0]; 956 leaf = path->nodes[0];
953 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 957 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
@@ -1042,7 +1046,7 @@ again:
1042 return 0; 1046 return 0;
1043#ifdef BTRFS_COMPAT_EXTENT_TREE_V0 1047#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1044 key.type = BTRFS_EXTENT_REF_V0_KEY; 1048 key.type = BTRFS_EXTENT_REF_V0_KEY;
1045 btrfs_release_path(root, path); 1049 btrfs_release_path(path);
1046 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1050 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1047 if (ret < 0) { 1051 if (ret < 0) {
1048 err = ret; 1052 err = ret;
@@ -1080,7 +1084,7 @@ again:
1080 if (match_extent_data_ref(leaf, ref, root_objectid, 1084 if (match_extent_data_ref(leaf, ref, root_objectid,
1081 owner, offset)) { 1085 owner, offset)) {
1082 if (recow) { 1086 if (recow) {
1083 btrfs_release_path(root, path); 1087 btrfs_release_path(path);
1084 goto again; 1088 goto again;
1085 } 1089 }
1086 err = 0; 1090 err = 0;
@@ -1141,7 +1145,7 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1141 if (match_extent_data_ref(leaf, ref, root_objectid, 1145 if (match_extent_data_ref(leaf, ref, root_objectid,
1142 owner, offset)) 1146 owner, offset))
1143 break; 1147 break;
1144 btrfs_release_path(root, path); 1148 btrfs_release_path(path);
1145 key.offset++; 1149 key.offset++;
1146 ret = btrfs_insert_empty_item(trans, root, path, &key, 1150 ret = btrfs_insert_empty_item(trans, root, path, &key,
1147 size); 1151 size);
@@ -1167,7 +1171,7 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1167 btrfs_mark_buffer_dirty(leaf); 1171 btrfs_mark_buffer_dirty(leaf);
1168 ret = 0; 1172 ret = 0;
1169fail: 1173fail:
1170 btrfs_release_path(root, path); 1174 btrfs_release_path(path);
1171 return ret; 1175 return ret;
1172} 1176}
1173 1177
@@ -1293,7 +1297,7 @@ static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1293 ret = -ENOENT; 1297 ret = -ENOENT;
1294#ifdef BTRFS_COMPAT_EXTENT_TREE_V0 1298#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1295 if (ret == -ENOENT && parent) { 1299 if (ret == -ENOENT && parent) {
1296 btrfs_release_path(root, path); 1300 btrfs_release_path(path);
1297 key.type = BTRFS_EXTENT_REF_V0_KEY; 1301 key.type = BTRFS_EXTENT_REF_V0_KEY;
1298 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1302 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1299 if (ret > 0) 1303 if (ret > 0)
@@ -1322,7 +1326,7 @@ static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1322 } 1326 }
1323 1327
1324 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 1328 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1325 btrfs_release_path(root, path); 1329 btrfs_release_path(path);
1326 return ret; 1330 return ret;
1327} 1331}
1328 1332
@@ -1555,7 +1559,6 @@ int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1555 size = btrfs_extent_inline_ref_size(type); 1559 size = btrfs_extent_inline_ref_size(type);
1556 1560
1557 ret = btrfs_extend_item(trans, root, path, size); 1561 ret = btrfs_extend_item(trans, root, path, size);
1558 BUG_ON(ret);
1559 1562
1560 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1563 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1561 refs = btrfs_extent_refs(leaf, ei); 1564 refs = btrfs_extent_refs(leaf, ei);
@@ -1608,7 +1611,7 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1608 if (ret != -ENOENT) 1611 if (ret != -ENOENT)
1609 return ret; 1612 return ret;
1610 1613
1611 btrfs_release_path(root, path); 1614 btrfs_release_path(path);
1612 *ref_ret = NULL; 1615 *ref_ret = NULL;
1613 1616
1614 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1617 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
@@ -1684,7 +1687,6 @@ int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1684 end - ptr - size); 1687 end - ptr - size);
1685 item_size -= size; 1688 item_size -= size;
1686 ret = btrfs_truncate_item(trans, root, path, item_size, 1); 1689 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1687 BUG_ON(ret);
1688 } 1690 }
1689 btrfs_mark_buffer_dirty(leaf); 1691 btrfs_mark_buffer_dirty(leaf);
1690 return 0; 1692 return 0;
@@ -1862,7 +1864,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1862 __run_delayed_extent_op(extent_op, leaf, item); 1864 __run_delayed_extent_op(extent_op, leaf, item);
1863 1865
1864 btrfs_mark_buffer_dirty(leaf); 1866 btrfs_mark_buffer_dirty(leaf);
1865 btrfs_release_path(root->fs_info->extent_root, path); 1867 btrfs_release_path(path);
1866 1868
1867 path->reada = 1; 1869 path->reada = 1;
1868 path->leave_spinning = 1; 1870 path->leave_spinning = 1;
@@ -2297,6 +2299,10 @@ again:
2297 atomic_inc(&ref->refs); 2299 atomic_inc(&ref->refs);
2298 2300
2299 spin_unlock(&delayed_refs->lock); 2301 spin_unlock(&delayed_refs->lock);
2302 /*
2303 * Mutex was contended, block until it's
2304 * released and try again
2305 */
2300 mutex_lock(&head->mutex); 2306 mutex_lock(&head->mutex);
2301 mutex_unlock(&head->mutex); 2307 mutex_unlock(&head->mutex);
2302 2308
@@ -2361,8 +2367,12 @@ static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2361 atomic_inc(&head->node.refs); 2367 atomic_inc(&head->node.refs);
2362 spin_unlock(&delayed_refs->lock); 2368 spin_unlock(&delayed_refs->lock);
2363 2369
2364 btrfs_release_path(root->fs_info->extent_root, path); 2370 btrfs_release_path(path);
2365 2371
2372 /*
2373 * Mutex was contended, block until it's released and let
2374 * caller try again
2375 */
2366 mutex_lock(&head->mutex); 2376 mutex_lock(&head->mutex);
2367 mutex_unlock(&head->mutex); 2377 mutex_unlock(&head->mutex);
2368 btrfs_put_delayed_ref(&head->node); 2378 btrfs_put_delayed_ref(&head->node);
@@ -2510,126 +2520,6 @@ out:
2510 return ret; 2520 return ret;
2511} 2521}
2512 2522
2513#if 0
2514int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2515 struct extent_buffer *buf, u32 nr_extents)
2516{
2517 struct btrfs_key key;
2518 struct btrfs_file_extent_item *fi;
2519 u64 root_gen;
2520 u32 nritems;
2521 int i;
2522 int level;
2523 int ret = 0;
2524 int shared = 0;
2525
2526 if (!root->ref_cows)
2527 return 0;
2528
2529 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2530 shared = 0;
2531 root_gen = root->root_key.offset;
2532 } else {
2533 shared = 1;
2534 root_gen = trans->transid - 1;
2535 }
2536
2537 level = btrfs_header_level(buf);
2538 nritems = btrfs_header_nritems(buf);
2539
2540 if (level == 0) {
2541 struct btrfs_leaf_ref *ref;
2542 struct btrfs_extent_info *info;
2543
2544 ref = btrfs_alloc_leaf_ref(root, nr_extents);
2545 if (!ref) {
2546 ret = -ENOMEM;
2547 goto out;
2548 }
2549
2550 ref->root_gen = root_gen;
2551 ref->bytenr = buf->start;
2552 ref->owner = btrfs_header_owner(buf);
2553 ref->generation = btrfs_header_generation(buf);
2554 ref->nritems = nr_extents;
2555 info = ref->extents;
2556
2557 for (i = 0; nr_extents > 0 && i < nritems; i++) {
2558 u64 disk_bytenr;
2559 btrfs_item_key_to_cpu(buf, &key, i);
2560 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2561 continue;
2562 fi = btrfs_item_ptr(buf, i,
2563 struct btrfs_file_extent_item);
2564 if (btrfs_file_extent_type(buf, fi) ==
2565 BTRFS_FILE_EXTENT_INLINE)
2566 continue;
2567 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2568 if (disk_bytenr == 0)
2569 continue;
2570
2571 info->bytenr = disk_bytenr;
2572 info->num_bytes =
2573 btrfs_file_extent_disk_num_bytes(buf, fi);
2574 info->objectid = key.objectid;
2575 info->offset = key.offset;
2576 info++;
2577 }
2578
2579 ret = btrfs_add_leaf_ref(root, ref, shared);
2580 if (ret == -EEXIST && shared) {
2581 struct btrfs_leaf_ref *old;
2582 old = btrfs_lookup_leaf_ref(root, ref->bytenr);
2583 BUG_ON(!old);
2584 btrfs_remove_leaf_ref(root, old);
2585 btrfs_free_leaf_ref(root, old);
2586 ret = btrfs_add_leaf_ref(root, ref, shared);
2587 }
2588 WARN_ON(ret);
2589 btrfs_free_leaf_ref(root, ref);
2590 }
2591out:
2592 return ret;
2593}
2594
2595/* when a block goes through cow, we update the reference counts of
2596 * everything that block points to. The internal pointers of the block
2597 * can be in just about any order, and it is likely to have clusters of
2598 * things that are close together and clusters of things that are not.
2599 *
2600 * To help reduce the seeks that come with updating all of these reference
2601 * counts, sort them by byte number before actual updates are done.
2602 *
2603 * struct refsort is used to match byte number to slot in the btree block.
2604 * we sort based on the byte number and then use the slot to actually
2605 * find the item.
2606 *
2607 * struct refsort is smaller than strcut btrfs_item and smaller than
2608 * struct btrfs_key_ptr. Since we're currently limited to the page size
2609 * for a btree block, there's no way for a kmalloc of refsorts for a
2610 * single node to be bigger than a page.
2611 */
2612struct refsort {
2613 u64 bytenr;
2614 u32 slot;
2615};
2616
2617/*
2618 * for passing into sort()
2619 */
2620static int refsort_cmp(const void *a_void, const void *b_void)
2621{
2622 const struct refsort *a = a_void;
2623 const struct refsort *b = b_void;
2624
2625 if (a->bytenr < b->bytenr)
2626 return -1;
2627 if (a->bytenr > b->bytenr)
2628 return 1;
2629 return 0;
2630}
2631#endif
2632
2633static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, 2523static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2634 struct btrfs_root *root, 2524 struct btrfs_root *root,
2635 struct extent_buffer *buf, 2525 struct extent_buffer *buf,
@@ -2732,7 +2622,7 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
2732 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); 2622 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2733 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); 2623 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2734 btrfs_mark_buffer_dirty(leaf); 2624 btrfs_mark_buffer_dirty(leaf);
2735 btrfs_release_path(extent_root, path); 2625 btrfs_release_path(path);
2736fail: 2626fail:
2737 if (ret) 2627 if (ret)
2738 return ret; 2628 return ret;
@@ -2785,7 +2675,7 @@ again:
2785 inode = lookup_free_space_inode(root, block_group, path); 2675 inode = lookup_free_space_inode(root, block_group, path);
2786 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 2676 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2787 ret = PTR_ERR(inode); 2677 ret = PTR_ERR(inode);
2788 btrfs_release_path(root, path); 2678 btrfs_release_path(path);
2789 goto out; 2679 goto out;
2790 } 2680 }
2791 2681
@@ -2854,7 +2744,7 @@ again:
2854out_put: 2744out_put:
2855 iput(inode); 2745 iput(inode);
2856out_free: 2746out_free:
2857 btrfs_release_path(root, path); 2747 btrfs_release_path(path);
2858out: 2748out:
2859 spin_lock(&block_group->lock); 2749 spin_lock(&block_group->lock);
2860 block_group->disk_cache_state = dcs; 2750 block_group->disk_cache_state = dcs;
@@ -3144,7 +3034,8 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3144 /* make sure bytes are sectorsize aligned */ 3034 /* make sure bytes are sectorsize aligned */
3145 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 3035 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3146 3036
3147 if (root == root->fs_info->tree_root) { 3037 if (root == root->fs_info->tree_root ||
3038 BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3148 alloc_chunk = 0; 3039 alloc_chunk = 0;
3149 committed = 1; 3040 committed = 1;
3150 } 3041 }
@@ -3211,18 +3102,6 @@ commit_trans:
3211 goto again; 3102 goto again;
3212 } 3103 }
3213 3104
3214#if 0 /* I hope we never need this code again, just in case */
3215 printk(KERN_ERR "no space left, need %llu, %llu bytes_used, "
3216 "%llu bytes_reserved, " "%llu bytes_pinned, "
3217 "%llu bytes_readonly, %llu may use %llu total\n",
3218 (unsigned long long)bytes,
3219 (unsigned long long)data_sinfo->bytes_used,
3220 (unsigned long long)data_sinfo->bytes_reserved,
3221 (unsigned long long)data_sinfo->bytes_pinned,
3222 (unsigned long long)data_sinfo->bytes_readonly,
3223 (unsigned long long)data_sinfo->bytes_may_use,
3224 (unsigned long long)data_sinfo->total_bytes);
3225#endif
3226 return -ENOSPC; 3105 return -ENOSPC;
3227 } 3106 }
3228 data_sinfo->bytes_may_use += bytes; 3107 data_sinfo->bytes_may_use += bytes;
@@ -3425,6 +3304,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
3425 if (reserved == 0) 3304 if (reserved == 0)
3426 return 0; 3305 return 0;
3427 3306
3307 /* nothing to shrink - nothing to reclaim */
3308 if (root->fs_info->delalloc_bytes == 0)
3309 return 0;
3310
3428 max_reclaim = min(reserved, to_reclaim); 3311 max_reclaim = min(reserved, to_reclaim);
3429 3312
3430 while (loops < 1024) { 3313 while (loops < 1024) {
@@ -3651,8 +3534,8 @@ static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
3651 spin_unlock(&block_rsv->lock); 3534 spin_unlock(&block_rsv->lock);
3652} 3535}
3653 3536
3654void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, 3537static void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
3655 struct btrfs_block_rsv *dest, u64 num_bytes) 3538 struct btrfs_block_rsv *dest, u64 num_bytes)
3656{ 3539{
3657 struct btrfs_space_info *space_info = block_rsv->space_info; 3540 struct btrfs_space_info *space_info = block_rsv->space_info;
3658 3541
@@ -3855,23 +3738,7 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
3855 u64 meta_used; 3738 u64 meta_used;
3856 u64 data_used; 3739 u64 data_used;
3857 int csum_size = btrfs_super_csum_size(&fs_info->super_copy); 3740 int csum_size = btrfs_super_csum_size(&fs_info->super_copy);
3858#if 0
3859 /*
3860 * per tree used space accounting can be inaccuracy, so we
3861 * can't rely on it.
3862 */
3863 spin_lock(&fs_info->extent_root->accounting_lock);
3864 num_bytes = btrfs_root_used(&fs_info->extent_root->root_item);
3865 spin_unlock(&fs_info->extent_root->accounting_lock);
3866
3867 spin_lock(&fs_info->csum_root->accounting_lock);
3868 num_bytes += btrfs_root_used(&fs_info->csum_root->root_item);
3869 spin_unlock(&fs_info->csum_root->accounting_lock);
3870 3741
3871 spin_lock(&fs_info->tree_root->accounting_lock);
3872 num_bytes += btrfs_root_used(&fs_info->tree_root->root_item);
3873 spin_unlock(&fs_info->tree_root->accounting_lock);
3874#endif
3875 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA); 3742 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
3876 spin_lock(&sinfo->lock); 3743 spin_lock(&sinfo->lock);
3877 data_used = sinfo->bytes_used; 3744 data_used = sinfo->bytes_used;
@@ -3924,10 +3791,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
3924 block_rsv->reserved = block_rsv->size; 3791 block_rsv->reserved = block_rsv->size;
3925 block_rsv->full = 1; 3792 block_rsv->full = 1;
3926 } 3793 }
3927#if 0 3794
3928 printk(KERN_INFO"global block rsv size %llu reserved %llu\n",
3929 block_rsv->size, block_rsv->reserved);
3930#endif
3931 spin_unlock(&sinfo->lock); 3795 spin_unlock(&sinfo->lock);
3932 spin_unlock(&block_rsv->lock); 3796 spin_unlock(&block_rsv->lock);
3933} 3797}
@@ -3973,12 +3837,6 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
3973 WARN_ON(fs_info->chunk_block_rsv.reserved > 0); 3837 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
3974} 3838}
3975 3839
3976static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items)
3977{
3978 return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
3979 3 * num_items;
3980}
3981
3982int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, 3840int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
3983 struct btrfs_root *root, 3841 struct btrfs_root *root,
3984 int num_items) 3842 int num_items)
@@ -3989,7 +3847,7 @@ int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
3989 if (num_items == 0 || root->fs_info->chunk_root == root) 3847 if (num_items == 0 || root->fs_info->chunk_root == root)
3990 return 0; 3848 return 0;
3991 3849
3992 num_bytes = calc_trans_metadata_size(root, num_items); 3850 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
3993 ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv, 3851 ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv,
3994 num_bytes); 3852 num_bytes);
3995 if (!ret) { 3853 if (!ret) {
@@ -4028,14 +3886,14 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4028 * If all of the metadata space is used, we can commit 3886 * If all of the metadata space is used, we can commit
4029 * transaction and use space it freed. 3887 * transaction and use space it freed.
4030 */ 3888 */
4031 u64 num_bytes = calc_trans_metadata_size(root, 4); 3889 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4);
4032 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); 3890 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4033} 3891}
4034 3892
4035void btrfs_orphan_release_metadata(struct inode *inode) 3893void btrfs_orphan_release_metadata(struct inode *inode)
4036{ 3894{
4037 struct btrfs_root *root = BTRFS_I(inode)->root; 3895 struct btrfs_root *root = BTRFS_I(inode)->root;
4038 u64 num_bytes = calc_trans_metadata_size(root, 4); 3896 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4);
4039 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); 3897 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4040} 3898}
4041 3899
@@ -4049,7 +3907,7 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4049 * two for root back/forward refs, two for directory entries 3907 * two for root back/forward refs, two for directory entries
4050 * and one for root of the snapshot. 3908 * and one for root of the snapshot.
4051 */ 3909 */
4052 u64 num_bytes = calc_trans_metadata_size(root, 5); 3910 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
4053 dst_rsv->space_info = src_rsv->space_info; 3911 dst_rsv->space_info = src_rsv->space_info;
4054 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); 3912 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4055} 3913}
@@ -4078,7 +3936,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4078 3936
4079 if (nr_extents > reserved_extents) { 3937 if (nr_extents > reserved_extents) {
4080 nr_extents -= reserved_extents; 3938 nr_extents -= reserved_extents;
4081 to_reserve = calc_trans_metadata_size(root, nr_extents); 3939 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4082 } else { 3940 } else {
4083 nr_extents = 0; 3941 nr_extents = 0;
4084 to_reserve = 0; 3942 to_reserve = 0;
@@ -4132,7 +3990,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4132 3990
4133 to_free = calc_csum_metadata_size(inode, num_bytes); 3991 to_free = calc_csum_metadata_size(inode, num_bytes);
4134 if (nr_extents > 0) 3992 if (nr_extents > 0)
4135 to_free += calc_trans_metadata_size(root, nr_extents); 3993 to_free += btrfs_calc_trans_metadata_size(root, nr_extents);
4136 3994
4137 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv, 3995 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4138 to_free); 3996 to_free);
@@ -4541,7 +4399,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4541 NULL, refs_to_drop, 4399 NULL, refs_to_drop,
4542 is_data); 4400 is_data);
4543 BUG_ON(ret); 4401 BUG_ON(ret);
4544 btrfs_release_path(extent_root, path); 4402 btrfs_release_path(path);
4545 path->leave_spinning = 1; 4403 path->leave_spinning = 1;
4546 4404
4547 key.objectid = bytenr; 4405 key.objectid = bytenr;
@@ -4580,7 +4438,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4580 owner_objectid, 0); 4438 owner_objectid, 0);
4581 BUG_ON(ret < 0); 4439 BUG_ON(ret < 0);
4582 4440
4583 btrfs_release_path(extent_root, path); 4441 btrfs_release_path(path);
4584 path->leave_spinning = 1; 4442 path->leave_spinning = 1;
4585 4443
4586 key.objectid = bytenr; 4444 key.objectid = bytenr;
@@ -4650,7 +4508,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4650 ret = btrfs_del_items(trans, extent_root, path, path->slots[0], 4508 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
4651 num_to_del); 4509 num_to_del);
4652 BUG_ON(ret); 4510 BUG_ON(ret);
4653 btrfs_release_path(extent_root, path); 4511 btrfs_release_path(path);
4654 4512
4655 if (is_data) { 4513 if (is_data) {
4656 ret = btrfs_del_csums(trans, root, bytenr, num_bytes); 4514 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
@@ -4893,7 +4751,7 @@ wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
4893 return 0; 4751 return 0;
4894 4752
4895 wait_event(caching_ctl->wait, block_group_cache_done(cache) || 4753 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
4896 (cache->free_space >= num_bytes)); 4754 (cache->free_space_ctl->free_space >= num_bytes));
4897 4755
4898 put_caching_control(caching_ctl); 4756 put_caching_control(caching_ctl);
4899 return 0; 4757 return 0;
@@ -6480,7 +6338,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
6480 trans->block_rsv = block_rsv; 6338 trans->block_rsv = block_rsv;
6481 } 6339 }
6482 } 6340 }
6483 btrfs_release_path(root, path); 6341 btrfs_release_path(path);
6484 BUG_ON(err); 6342 BUG_ON(err);
6485 6343
6486 ret = btrfs_del_root(trans, tree_root, &root->root_key); 6344 ret = btrfs_del_root(trans, tree_root, &root->root_key);
@@ -6584,1514 +6442,6 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6584 return ret; 6442 return ret;
6585} 6443}
6586 6444
6587#if 0
6588static unsigned long calc_ra(unsigned long start, unsigned long last,
6589 unsigned long nr)
6590{
6591 return min(last, start + nr - 1);
6592}
6593
6594static noinline int relocate_inode_pages(struct inode *inode, u64 start,
6595 u64 len)
6596{
6597 u64 page_start;
6598 u64 page_end;
6599 unsigned long first_index;
6600 unsigned long last_index;
6601 unsigned long i;
6602 struct page *page;
6603 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6604 struct file_ra_state *ra;
6605 struct btrfs_ordered_extent *ordered;
6606 unsigned int total_read = 0;
6607 unsigned int total_dirty = 0;
6608 int ret = 0;
6609
6610 ra = kzalloc(sizeof(*ra), GFP_NOFS);
6611 if (!ra)
6612 return -ENOMEM;
6613
6614 mutex_lock(&inode->i_mutex);
6615 first_index = start >> PAGE_CACHE_SHIFT;
6616 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
6617
6618 /* make sure the dirty trick played by the caller work */
6619 ret = invalidate_inode_pages2_range(inode->i_mapping,
6620 first_index, last_index);
6621 if (ret)
6622 goto out_unlock;
6623
6624 file_ra_state_init(ra, inode->i_mapping);
6625
6626 for (i = first_index ; i <= last_index; i++) {
6627 if (total_read % ra->ra_pages == 0) {
6628 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
6629 calc_ra(i, last_index, ra->ra_pages));
6630 }
6631 total_read++;
6632again:
6633 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
6634 BUG_ON(1);
6635 page = grab_cache_page(inode->i_mapping, i);
6636 if (!page) {
6637 ret = -ENOMEM;
6638 goto out_unlock;
6639 }
6640 if (!PageUptodate(page)) {
6641 btrfs_readpage(NULL, page);
6642 lock_page(page);
6643 if (!PageUptodate(page)) {
6644 unlock_page(page);
6645 page_cache_release(page);
6646 ret = -EIO;
6647 goto out_unlock;
6648 }
6649 }
6650 wait_on_page_writeback(page);
6651
6652 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
6653 page_end = page_start + PAGE_CACHE_SIZE - 1;
6654 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
6655
6656 ordered = btrfs_lookup_ordered_extent(inode, page_start);
6657 if (ordered) {
6658 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
6659 unlock_page(page);
6660 page_cache_release(page);
6661 btrfs_start_ordered_extent(inode, ordered, 1);
6662 btrfs_put_ordered_extent(ordered);
6663 goto again;
6664 }
6665 set_page_extent_mapped(page);
6666
6667 if (i == first_index)
6668 set_extent_bits(io_tree, page_start, page_end,
6669 EXTENT_BOUNDARY, GFP_NOFS);
6670 btrfs_set_extent_delalloc(inode, page_start, page_end);
6671
6672 set_page_dirty(page);
6673 total_dirty++;
6674
6675 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
6676 unlock_page(page);
6677 page_cache_release(page);
6678 }
6679
6680out_unlock:
6681 kfree(ra);
6682 mutex_unlock(&inode->i_mutex);
6683 balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
6684 return ret;
6685}
6686
6687static noinline int relocate_data_extent(struct inode *reloc_inode,
6688 struct btrfs_key *extent_key,
6689 u64 offset)
6690{
6691 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
6692 struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
6693 struct extent_map *em;
6694 u64 start = extent_key->objectid - offset;
6695 u64 end = start + extent_key->offset - 1;
6696
6697 em = alloc_extent_map(GFP_NOFS);
6698 BUG_ON(!em);
6699
6700 em->start = start;
6701 em->len = extent_key->offset;
6702 em->block_len = extent_key->offset;
6703 em->block_start = extent_key->objectid;
6704 em->bdev = root->fs_info->fs_devices->latest_bdev;
6705 set_bit(EXTENT_FLAG_PINNED, &em->flags);
6706
6707 /* setup extent map to cheat btrfs_readpage */
6708 lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
6709 while (1) {
6710 int ret;
6711 write_lock(&em_tree->lock);
6712 ret = add_extent_mapping(em_tree, em);
6713 write_unlock(&em_tree->lock);
6714 if (ret != -EEXIST) {
6715 free_extent_map(em);
6716 break;
6717 }
6718 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
6719 }
6720 unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
6721
6722 return relocate_inode_pages(reloc_inode, start, extent_key->offset);
6723}
6724
6725struct btrfs_ref_path {
6726 u64 extent_start;
6727 u64 nodes[BTRFS_MAX_LEVEL];
6728 u64 root_objectid;
6729 u64 root_generation;
6730 u64 owner_objectid;
6731 u32 num_refs;
6732 int lowest_level;
6733 int current_level;
6734 int shared_level;
6735
6736 struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
6737 u64 new_nodes[BTRFS_MAX_LEVEL];
6738};
6739
6740struct disk_extent {
6741 u64 ram_bytes;
6742 u64 disk_bytenr;
6743 u64 disk_num_bytes;
6744 u64 offset;
6745 u64 num_bytes;
6746 u8 compression;
6747 u8 encryption;
6748 u16 other_encoding;
6749};
6750
6751static int is_cowonly_root(u64 root_objectid)
6752{
6753 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
6754 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
6755 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
6756 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
6757 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
6758 root_objectid == BTRFS_CSUM_TREE_OBJECTID)
6759 return 1;
6760 return 0;
6761}
6762
6763static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
6764 struct btrfs_root *extent_root,
6765 struct btrfs_ref_path *ref_path,
6766 int first_time)
6767{
6768 struct extent_buffer *leaf;
6769 struct btrfs_path *path;
6770 struct btrfs_extent_ref *ref;
6771 struct btrfs_key key;
6772 struct btrfs_key found_key;
6773 u64 bytenr;
6774 u32 nritems;
6775 int level;
6776 int ret = 1;
6777
6778 path = btrfs_alloc_path();
6779 if (!path)
6780 return -ENOMEM;
6781
6782 if (first_time) {
6783 ref_path->lowest_level = -1;
6784 ref_path->current_level = -1;
6785 ref_path->shared_level = -1;
6786 goto walk_up;
6787 }
6788walk_down:
6789 level = ref_path->current_level - 1;
6790 while (level >= -1) {
6791 u64 parent;
6792 if (level < ref_path->lowest_level)
6793 break;
6794
6795 if (level >= 0)
6796 bytenr = ref_path->nodes[level];
6797 else
6798 bytenr = ref_path->extent_start;
6799 BUG_ON(bytenr == 0);
6800
6801 parent = ref_path->nodes[level + 1];
6802 ref_path->nodes[level + 1] = 0;
6803 ref_path->current_level = level;
6804 BUG_ON(parent == 0);
6805
6806 key.objectid = bytenr;
6807 key.offset = parent + 1;
6808 key.type = BTRFS_EXTENT_REF_KEY;
6809
6810 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
6811 if (ret < 0)
6812 goto out;
6813 BUG_ON(ret == 0);
6814
6815 leaf = path->nodes[0];
6816 nritems = btrfs_header_nritems(leaf);
6817 if (path->slots[0] >= nritems) {
6818 ret = btrfs_next_leaf(extent_root, path);
6819 if (ret < 0)
6820 goto out;
6821 if (ret > 0)
6822 goto next;
6823 leaf = path->nodes[0];
6824 }
6825
6826 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6827 if (found_key.objectid == bytenr &&
6828 found_key.type == BTRFS_EXTENT_REF_KEY) {
6829 if (level < ref_path->shared_level)
6830 ref_path->shared_level = level;
6831 goto found;
6832 }
6833next:
6834 level--;
6835 btrfs_release_path(extent_root, path);
6836 cond_resched();
6837 }
6838 /* reached lowest level */
6839 ret = 1;
6840 goto out;
6841walk_up:
6842 level = ref_path->current_level;
6843 while (level < BTRFS_MAX_LEVEL - 1) {
6844 u64 ref_objectid;
6845
6846 if (level >= 0)
6847 bytenr = ref_path->nodes[level];
6848 else
6849 bytenr = ref_path->extent_start;
6850
6851 BUG_ON(bytenr == 0);
6852
6853 key.objectid = bytenr;
6854 key.offset = 0;
6855 key.type = BTRFS_EXTENT_REF_KEY;
6856
6857 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
6858 if (ret < 0)
6859 goto out;
6860
6861 leaf = path->nodes[0];
6862 nritems = btrfs_header_nritems(leaf);
6863 if (path->slots[0] >= nritems) {
6864 ret = btrfs_next_leaf(extent_root, path);
6865 if (ret < 0)
6866 goto out;
6867 if (ret > 0) {
6868 /* the extent was freed by someone */
6869 if (ref_path->lowest_level == level)
6870 goto out;
6871 btrfs_release_path(extent_root, path);
6872 goto walk_down;
6873 }
6874 leaf = path->nodes[0];
6875 }
6876
6877 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6878 if (found_key.objectid != bytenr ||
6879 found_key.type != BTRFS_EXTENT_REF_KEY) {
6880 /* the extent was freed by someone */
6881 if (ref_path->lowest_level == level) {
6882 ret = 1;
6883 goto out;
6884 }
6885 btrfs_release_path(extent_root, path);
6886 goto walk_down;
6887 }
6888found:
6889 ref = btrfs_item_ptr(leaf, path->slots[0],
6890 struct btrfs_extent_ref);
6891 ref_objectid = btrfs_ref_objectid(leaf, ref);
6892 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
6893 if (first_time) {
6894 level = (int)ref_objectid;
6895 BUG_ON(level >= BTRFS_MAX_LEVEL);
6896 ref_path->lowest_level = level;
6897 ref_path->current_level = level;
6898 ref_path->nodes[level] = bytenr;
6899 } else {
6900 WARN_ON(ref_objectid != level);
6901 }
6902 } else {
6903 WARN_ON(level != -1);
6904 }
6905 first_time = 0;
6906
6907 if (ref_path->lowest_level == level) {
6908 ref_path->owner_objectid = ref_objectid;
6909 ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
6910 }
6911
6912 /*
6913 * the block is tree root or the block isn't in reference
6914 * counted tree.
6915 */
6916 if (found_key.objectid == found_key.offset ||
6917 is_cowonly_root(btrfs_ref_root(leaf, ref))) {
6918 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6919 ref_path->root_generation =
6920 btrfs_ref_generation(leaf, ref);
6921 if (level < 0) {
6922 /* special reference from the tree log */
6923 ref_path->nodes[0] = found_key.offset;
6924 ref_path->current_level = 0;
6925 }
6926 ret = 0;
6927 goto out;
6928 }
6929
6930 level++;
6931 BUG_ON(ref_path->nodes[level] != 0);
6932 ref_path->nodes[level] = found_key.offset;
6933 ref_path->current_level = level;
6934
6935 /*
6936 * the reference was created in the running transaction,
6937 * no need to continue walking up.
6938 */
6939 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
6940 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6941 ref_path->root_generation =
6942 btrfs_ref_generation(leaf, ref);
6943 ret = 0;
6944 goto out;
6945 }
6946
6947 btrfs_release_path(extent_root, path);
6948 cond_resched();
6949 }
6950 /* reached max tree level, but no tree root found. */
6951 BUG();
6952out:
6953 btrfs_free_path(path);
6954 return ret;
6955}
6956
6957static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
6958 struct btrfs_root *extent_root,
6959 struct btrfs_ref_path *ref_path,
6960 u64 extent_start)
6961{
6962 memset(ref_path, 0, sizeof(*ref_path));
6963 ref_path->extent_start = extent_start;
6964
6965 return __next_ref_path(trans, extent_root, ref_path, 1);
6966}
6967
6968static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
6969 struct btrfs_root *extent_root,
6970 struct btrfs_ref_path *ref_path)
6971{
6972 return __next_ref_path(trans, extent_root, ref_path, 0);
6973}
6974
6975static noinline int get_new_locations(struct inode *reloc_inode,
6976 struct btrfs_key *extent_key,
6977 u64 offset, int no_fragment,
6978 struct disk_extent **extents,
6979 int *nr_extents)
6980{
6981 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
6982 struct btrfs_path *path;
6983 struct btrfs_file_extent_item *fi;
6984 struct extent_buffer *leaf;
6985 struct disk_extent *exts = *extents;
6986 struct btrfs_key found_key;
6987 u64 cur_pos;
6988 u64 last_byte;
6989 u32 nritems;
6990 int nr = 0;
6991 int max = *nr_extents;
6992 int ret;
6993
6994 WARN_ON(!no_fragment && *extents);
6995 if (!exts) {
6996 max = 1;
6997 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
6998 if (!exts)
6999 return -ENOMEM;
7000 }
7001
7002 path = btrfs_alloc_path();
7003 if (!path) {
7004 if (exts != *extents)
7005 kfree(exts);
7006 return -ENOMEM;
7007 }
7008
7009 cur_pos = extent_key->objectid - offset;
7010 last_byte = extent_key->objectid + extent_key->offset;
7011 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
7012 cur_pos, 0);
7013 if (ret < 0)
7014 goto out;
7015 if (ret > 0) {
7016 ret = -ENOENT;
7017 goto out;
7018 }
7019
7020 while (1) {
7021 leaf = path->nodes[0];
7022 nritems = btrfs_header_nritems(leaf);
7023 if (path->slots[0] >= nritems) {
7024 ret = btrfs_next_leaf(root, path);
7025 if (ret < 0)
7026 goto out;
7027 if (ret > 0)
7028 break;
7029 leaf = path->nodes[0];
7030 }
7031
7032 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7033 if (found_key.offset != cur_pos ||
7034 found_key.type != BTRFS_EXTENT_DATA_KEY ||
7035 found_key.objectid != reloc_inode->i_ino)
7036 break;
7037
7038 fi = btrfs_item_ptr(leaf, path->slots[0],
7039 struct btrfs_file_extent_item);
7040 if (btrfs_file_extent_type(leaf, fi) !=
7041 BTRFS_FILE_EXTENT_REG ||
7042 btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
7043 break;
7044
7045 if (nr == max) {
7046 struct disk_extent *old = exts;
7047 max *= 2;
7048 exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
7049 if (!exts) {
7050 ret = -ENOMEM;
7051 goto out;
7052 }
7053 memcpy(exts, old, sizeof(*exts) * nr);
7054 if (old != *extents)
7055 kfree(old);
7056 }
7057
7058 exts[nr].disk_bytenr =
7059 btrfs_file_extent_disk_bytenr(leaf, fi);
7060 exts[nr].disk_num_bytes =
7061 btrfs_file_extent_disk_num_bytes(leaf, fi);
7062 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
7063 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
7064 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7065 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
7066 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
7067 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
7068 fi);
7069 BUG_ON(exts[nr].offset > 0);
7070 BUG_ON(exts[nr].compression || exts[nr].encryption);
7071 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
7072
7073 cur_pos += exts[nr].num_bytes;
7074 nr++;
7075
7076 if (cur_pos + offset >= last_byte)
7077 break;
7078
7079 if (no_fragment) {
7080 ret = 1;
7081 goto out;
7082 }
7083 path->slots[0]++;
7084 }
7085
7086 BUG_ON(cur_pos + offset > last_byte);
7087 if (cur_pos + offset < last_byte) {
7088 ret = -ENOENT;
7089 goto out;
7090 }
7091 ret = 0;
7092out:
7093 btrfs_free_path(path);
7094 if (ret) {
7095 if (exts != *extents)
7096 kfree(exts);
7097 } else {
7098 *extents = exts;
7099 *nr_extents = nr;
7100 }
7101 return ret;
7102}
7103
7104static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
7105 struct btrfs_root *root,
7106 struct btrfs_path *path,
7107 struct btrfs_key *extent_key,
7108 struct btrfs_key *leaf_key,
7109 struct btrfs_ref_path *ref_path,
7110 struct disk_extent *new_extents,
7111 int nr_extents)
7112{
7113 struct extent_buffer *leaf;
7114 struct btrfs_file_extent_item *fi;
7115 struct inode *inode = NULL;
7116 struct btrfs_key key;
7117 u64 lock_start = 0;
7118 u64 lock_end = 0;
7119 u64 num_bytes;
7120 u64 ext_offset;
7121 u64 search_end = (u64)-1;
7122 u32 nritems;
7123 int nr_scaned = 0;
7124 int extent_locked = 0;
7125 int extent_type;
7126 int ret;
7127
7128 memcpy(&key, leaf_key, sizeof(key));
7129 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
7130 if (key.objectid < ref_path->owner_objectid ||
7131 (key.objectid == ref_path->owner_objectid &&
7132 key.type < BTRFS_EXTENT_DATA_KEY)) {
7133 key.objectid = ref_path->owner_objectid;
7134 key.type = BTRFS_EXTENT_DATA_KEY;
7135 key.offset = 0;
7136 }
7137 }
7138
7139 while (1) {
7140 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
7141 if (ret < 0)
7142 goto out;
7143
7144 leaf = path->nodes[0];
7145 nritems = btrfs_header_nritems(leaf);
7146next:
7147 if (extent_locked && ret > 0) {
7148 /*
7149 * the file extent item was modified by someone
7150 * before the extent got locked.
7151 */
7152 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7153 lock_end, GFP_NOFS);
7154 extent_locked = 0;
7155 }
7156
7157 if (path->slots[0] >= nritems) {
7158 if (++nr_scaned > 2)
7159 break;
7160
7161 BUG_ON(extent_locked);
7162 ret = btrfs_next_leaf(root, path);
7163 if (ret < 0)
7164 goto out;
7165 if (ret > 0)
7166 break;
7167 leaf = path->nodes[0];
7168 nritems = btrfs_header_nritems(leaf);
7169 }
7170
7171 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7172
7173 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
7174 if ((key.objectid > ref_path->owner_objectid) ||
7175 (key.objectid == ref_path->owner_objectid &&
7176 key.type > BTRFS_EXTENT_DATA_KEY) ||
7177 key.offset >= search_end)
7178 break;
7179 }
7180
7181 if (inode && key.objectid != inode->i_ino) {
7182 BUG_ON(extent_locked);
7183 btrfs_release_path(root, path);
7184 mutex_unlock(&inode->i_mutex);
7185 iput(inode);
7186 inode = NULL;
7187 continue;
7188 }
7189
7190 if (key.type != BTRFS_EXTENT_DATA_KEY) {
7191 path->slots[0]++;
7192 ret = 1;
7193 goto next;
7194 }
7195 fi = btrfs_item_ptr(leaf, path->slots[0],
7196 struct btrfs_file_extent_item);
7197 extent_type = btrfs_file_extent_type(leaf, fi);
7198 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
7199 extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
7200 (btrfs_file_extent_disk_bytenr(leaf, fi) !=
7201 extent_key->objectid)) {
7202 path->slots[0]++;
7203 ret = 1;
7204 goto next;
7205 }
7206
7207 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
7208 ext_offset = btrfs_file_extent_offset(leaf, fi);
7209
7210 if (search_end == (u64)-1) {
7211 search_end = key.offset - ext_offset +
7212 btrfs_file_extent_ram_bytes(leaf, fi);
7213 }
7214
7215 if (!extent_locked) {
7216 lock_start = key.offset;
7217 lock_end = lock_start + num_bytes - 1;
7218 } else {
7219 if (lock_start > key.offset ||
7220 lock_end + 1 < key.offset + num_bytes) {
7221 unlock_extent(&BTRFS_I(inode)->io_tree,
7222 lock_start, lock_end, GFP_NOFS);
7223 extent_locked = 0;
7224 }
7225 }
7226
7227 if (!inode) {
7228 btrfs_release_path(root, path);
7229
7230 inode = btrfs_iget_locked(root->fs_info->sb,
7231 key.objectid, root);
7232 if (inode->i_state & I_NEW) {
7233 BTRFS_I(inode)->root = root;
7234 BTRFS_I(inode)->location.objectid =
7235 key.objectid;
7236 BTRFS_I(inode)->location.type =
7237 BTRFS_INODE_ITEM_KEY;
7238 BTRFS_I(inode)->location.offset = 0;
7239 btrfs_read_locked_inode(inode);
7240 unlock_new_inode(inode);
7241 }
7242 /*
7243 * some code call btrfs_commit_transaction while
7244 * holding the i_mutex, so we can't use mutex_lock
7245 * here.
7246 */
7247 if (is_bad_inode(inode) ||
7248 !mutex_trylock(&inode->i_mutex)) {
7249 iput(inode);
7250 inode = NULL;
7251 key.offset = (u64)-1;
7252 goto skip;
7253 }
7254 }
7255
7256 if (!extent_locked) {
7257 struct btrfs_ordered_extent *ordered;
7258
7259 btrfs_release_path(root, path);
7260
7261 lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7262 lock_end, GFP_NOFS);
7263 ordered = btrfs_lookup_first_ordered_extent(inode,
7264 lock_end);
7265 if (ordered &&
7266 ordered->file_offset <= lock_end &&
7267 ordered->file_offset + ordered->len > lock_start) {
7268 unlock_extent(&BTRFS_I(inode)->io_tree,
7269 lock_start, lock_end, GFP_NOFS);
7270 btrfs_start_ordered_extent(inode, ordered, 1);
7271 btrfs_put_ordered_extent(ordered);
7272 key.offset += num_bytes;
7273 goto skip;
7274 }
7275 if (ordered)
7276 btrfs_put_ordered_extent(ordered);
7277
7278 extent_locked = 1;
7279 continue;
7280 }
7281
7282 if (nr_extents == 1) {
7283 /* update extent pointer in place */
7284 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7285 new_extents[0].disk_bytenr);
7286 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7287 new_extents[0].disk_num_bytes);
7288 btrfs_mark_buffer_dirty(leaf);
7289
7290 btrfs_drop_extent_cache(inode, key.offset,
7291 key.offset + num_bytes - 1, 0);
7292
7293 ret = btrfs_inc_extent_ref(trans, root,
7294 new_extents[0].disk_bytenr,
7295 new_extents[0].disk_num_bytes,
7296 leaf->start,
7297 root->root_key.objectid,
7298 trans->transid,
7299 key.objectid);
7300 BUG_ON(ret);
7301
7302 ret = btrfs_free_extent(trans, root,
7303 extent_key->objectid,
7304 extent_key->offset,
7305 leaf->start,
7306 btrfs_header_owner(leaf),
7307 btrfs_header_generation(leaf),
7308 key.objectid, 0);
7309 BUG_ON(ret);
7310
7311 btrfs_release_path(root, path);
7312 key.offset += num_bytes;
7313 } else {
7314 BUG_ON(1);
7315#if 0
7316 u64 alloc_hint;
7317 u64 extent_len;
7318 int i;
7319 /*
7320 * drop old extent pointer at first, then insert the
7321 * new pointers one bye one
7322 */
7323 btrfs_release_path(root, path);
7324 ret = btrfs_drop_extents(trans, root, inode, key.offset,
7325 key.offset + num_bytes,
7326 key.offset, &alloc_hint);
7327 BUG_ON(ret);
7328
7329 for (i = 0; i < nr_extents; i++) {
7330 if (ext_offset >= new_extents[i].num_bytes) {
7331 ext_offset -= new_extents[i].num_bytes;
7332 continue;
7333 }
7334 extent_len = min(new_extents[i].num_bytes -
7335 ext_offset, num_bytes);
7336
7337 ret = btrfs_insert_empty_item(trans, root,
7338 path, &key,
7339 sizeof(*fi));
7340 BUG_ON(ret);
7341
7342 leaf = path->nodes[0];
7343 fi = btrfs_item_ptr(leaf, path->slots[0],
7344 struct btrfs_file_extent_item);
7345 btrfs_set_file_extent_generation(leaf, fi,
7346 trans->transid);
7347 btrfs_set_file_extent_type(leaf, fi,
7348 BTRFS_FILE_EXTENT_REG);
7349 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7350 new_extents[i].disk_bytenr);
7351 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7352 new_extents[i].disk_num_bytes);
7353 btrfs_set_file_extent_ram_bytes(leaf, fi,
7354 new_extents[i].ram_bytes);
7355
7356 btrfs_set_file_extent_compression(leaf, fi,
7357 new_extents[i].compression);
7358 btrfs_set_file_extent_encryption(leaf, fi,
7359 new_extents[i].encryption);
7360 btrfs_set_file_extent_other_encoding(leaf, fi,
7361 new_extents[i].other_encoding);
7362
7363 btrfs_set_file_extent_num_bytes(leaf, fi,
7364 extent_len);
7365 ext_offset += new_extents[i].offset;
7366 btrfs_set_file_extent_offset(leaf, fi,
7367 ext_offset);
7368 btrfs_mark_buffer_dirty(leaf);
7369
7370 btrfs_drop_extent_cache(inode, key.offset,
7371 key.offset + extent_len - 1, 0);
7372
7373 ret = btrfs_inc_extent_ref(trans, root,
7374 new_extents[i].disk_bytenr,
7375 new_extents[i].disk_num_bytes,
7376 leaf->start,
7377 root->root_key.objectid,
7378 trans->transid, key.objectid);
7379 BUG_ON(ret);
7380 btrfs_release_path(root, path);
7381
7382 inode_add_bytes(inode, extent_len);
7383
7384 ext_offset = 0;
7385 num_bytes -= extent_len;
7386 key.offset += extent_len;
7387
7388 if (num_bytes == 0)
7389 break;
7390 }
7391 BUG_ON(i >= nr_extents);
7392#endif
7393 }
7394
7395 if (extent_locked) {
7396 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7397 lock_end, GFP_NOFS);
7398 extent_locked = 0;
7399 }
7400skip:
7401 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
7402 key.offset >= search_end)
7403 break;
7404
7405 cond_resched();
7406 }
7407 ret = 0;
7408out:
7409 btrfs_release_path(root, path);
7410 if (inode) {
7411 mutex_unlock(&inode->i_mutex);
7412 if (extent_locked) {
7413 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7414 lock_end, GFP_NOFS);
7415 }
7416 iput(inode);
7417 }
7418 return ret;
7419}
7420
7421int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
7422 struct btrfs_root *root,
7423 struct extent_buffer *buf, u64 orig_start)
7424{
7425 int level;
7426 int ret;
7427
7428 BUG_ON(btrfs_header_generation(buf) != trans->transid);
7429 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7430
7431 level = btrfs_header_level(buf);
7432 if (level == 0) {
7433 struct btrfs_leaf_ref *ref;
7434 struct btrfs_leaf_ref *orig_ref;
7435
7436 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
7437 if (!orig_ref)
7438 return -ENOENT;
7439
7440 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
7441 if (!ref) {
7442 btrfs_free_leaf_ref(root, orig_ref);
7443 return -ENOMEM;
7444 }
7445
7446 ref->nritems = orig_ref->nritems;
7447 memcpy(ref->extents, orig_ref->extents,
7448 sizeof(ref->extents[0]) * ref->nritems);
7449
7450 btrfs_free_leaf_ref(root, orig_ref);
7451
7452 ref->root_gen = trans->transid;
7453 ref->bytenr = buf->start;
7454 ref->owner = btrfs_header_owner(buf);
7455 ref->generation = btrfs_header_generation(buf);
7456
7457 ret = btrfs_add_leaf_ref(root, ref, 0);
7458 WARN_ON(ret);
7459 btrfs_free_leaf_ref(root, ref);
7460 }
7461 return 0;
7462}
7463
7464static noinline int invalidate_extent_cache(struct btrfs_root *root,
7465 struct extent_buffer *leaf,
7466 struct btrfs_block_group_cache *group,
7467 struct btrfs_root *target_root)
7468{
7469 struct btrfs_key key;
7470 struct inode *inode = NULL;
7471 struct btrfs_file_extent_item *fi;
7472 struct extent_state *cached_state = NULL;
7473 u64 num_bytes;
7474 u64 skip_objectid = 0;
7475 u32 nritems;
7476 u32 i;
7477
7478 nritems = btrfs_header_nritems(leaf);
7479 for (i = 0; i < nritems; i++) {
7480 btrfs_item_key_to_cpu(leaf, &key, i);
7481 if (key.objectid == skip_objectid ||
7482 key.type != BTRFS_EXTENT_DATA_KEY)
7483 continue;
7484 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
7485 if (btrfs_file_extent_type(leaf, fi) ==
7486 BTRFS_FILE_EXTENT_INLINE)
7487 continue;
7488 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
7489 continue;
7490 if (!inode || inode->i_ino != key.objectid) {
7491 iput(inode);
7492 inode = btrfs_ilookup(target_root->fs_info->sb,
7493 key.objectid, target_root, 1);
7494 }
7495 if (!inode) {
7496 skip_objectid = key.objectid;
7497 continue;
7498 }
7499 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
7500
7501 lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset,
7502 key.offset + num_bytes - 1, 0, &cached_state,
7503 GFP_NOFS);
7504 btrfs_drop_extent_cache(inode, key.offset,
7505 key.offset + num_bytes - 1, 1);
7506 unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset,
7507 key.offset + num_bytes - 1, &cached_state,
7508 GFP_NOFS);
7509 cond_resched();
7510 }
7511 iput(inode);
7512 return 0;
7513}
7514
7515static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
7516 struct btrfs_root *root,
7517 struct extent_buffer *leaf,
7518 struct btrfs_block_group_cache *group,
7519 struct inode *reloc_inode)
7520{
7521 struct btrfs_key key;
7522 struct btrfs_key extent_key;
7523 struct btrfs_file_extent_item *fi;
7524 struct btrfs_leaf_ref *ref;
7525 struct disk_extent *new_extent;
7526 u64 bytenr;
7527 u64 num_bytes;
7528 u32 nritems;
7529 u32 i;
7530 int ext_index;
7531 int nr_extent;
7532 int ret;
7533
7534 new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
7535 if (!new_extent)
7536 return -ENOMEM;
7537
7538 ref = btrfs_lookup_leaf_ref(root, leaf->start);
7539 BUG_ON(!ref);
7540
7541 ext_index = -1;
7542 nritems = btrfs_header_nritems(leaf);
7543 for (i = 0; i < nritems; i++) {
7544 btrfs_item_key_to_cpu(leaf, &key, i);
7545 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
7546 continue;
7547 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
7548 if (btrfs_file_extent_type(leaf, fi) ==
7549 BTRFS_FILE_EXTENT_INLINE)
7550 continue;
7551 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7552 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
7553 if (bytenr == 0)
7554 continue;
7555
7556 ext_index++;
7557 if (bytenr >= group->key.objectid + group->key.offset ||
7558 bytenr + num_bytes <= group->key.objectid)
7559 continue;
7560
7561 extent_key.objectid = bytenr;
7562 extent_key.offset = num_bytes;
7563 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
7564 nr_extent = 1;
7565 ret = get_new_locations(reloc_inode, &extent_key,
7566 group->key.objectid, 1,
7567 &new_extent, &nr_extent);
7568 if (ret > 0)
7569 continue;
7570 BUG_ON(ret < 0);
7571
7572 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
7573 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
7574 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
7575 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
7576
7577 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7578 new_extent->disk_bytenr);
7579 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7580 new_extent->disk_num_bytes);
7581 btrfs_mark_buffer_dirty(leaf);
7582
7583 ret = btrfs_inc_extent_ref(trans, root,
7584 new_extent->disk_bytenr,
7585 new_extent->disk_num_bytes,
7586 leaf->start,
7587 root->root_key.objectid,
7588 trans->transid, key.objectid);
7589 BUG_ON(ret);
7590
7591 ret = btrfs_free_extent(trans, root,
7592 bytenr, num_bytes, leaf->start,
7593 btrfs_header_owner(leaf),
7594 btrfs_header_generation(leaf),
7595 key.objectid, 0);
7596 BUG_ON(ret);
7597 cond_resched();
7598 }
7599 kfree(new_extent);
7600 BUG_ON(ext_index + 1 != ref->nritems);
7601 btrfs_free_leaf_ref(root, ref);
7602 return 0;
7603}
7604
7605int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
7606 struct btrfs_root *root)
7607{
7608 struct btrfs_root *reloc_root;
7609 int ret;
7610
7611 if (root->reloc_root) {
7612 reloc_root = root->reloc_root;
7613 root->reloc_root = NULL;
7614 list_add(&reloc_root->dead_list,
7615 &root->fs_info->dead_reloc_roots);
7616
7617 btrfs_set_root_bytenr(&reloc_root->root_item,
7618 reloc_root->node->start);
7619 btrfs_set_root_level(&root->root_item,
7620 btrfs_header_level(reloc_root->node));
7621 memset(&reloc_root->root_item.drop_progress, 0,
7622 sizeof(struct btrfs_disk_key));
7623 reloc_root->root_item.drop_level = 0;
7624
7625 ret = btrfs_update_root(trans, root->fs_info->tree_root,
7626 &reloc_root->root_key,
7627 &reloc_root->root_item);
7628 BUG_ON(ret);
7629 }
7630 return 0;
7631}
7632
7633int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
7634{
7635 struct btrfs_trans_handle *trans;
7636 struct btrfs_root *reloc_root;
7637 struct btrfs_root *prev_root = NULL;
7638 struct list_head dead_roots;
7639 int ret;
7640 unsigned long nr;
7641
7642 INIT_LIST_HEAD(&dead_roots);
7643 list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
7644
7645 while (!list_empty(&dead_roots)) {
7646 reloc_root = list_entry(dead_roots.prev,
7647 struct btrfs_root, dead_list);
7648 list_del_init(&reloc_root->dead_list);
7649
7650 BUG_ON(reloc_root->commit_root != NULL);
7651 while (1) {
7652 trans = btrfs_join_transaction(root, 1);
7653 BUG_ON(IS_ERR(trans));
7654
7655 mutex_lock(&root->fs_info->drop_mutex);
7656 ret = btrfs_drop_snapshot(trans, reloc_root);
7657 if (ret != -EAGAIN)
7658 break;
7659 mutex_unlock(&root->fs_info->drop_mutex);
7660
7661 nr = trans->blocks_used;
7662 ret = btrfs_end_transaction(trans, root);
7663 BUG_ON(ret);
7664 btrfs_btree_balance_dirty(root, nr);
7665 }
7666
7667 free_extent_buffer(reloc_root->node);
7668
7669 ret = btrfs_del_root(trans, root->fs_info->tree_root,
7670 &reloc_root->root_key);
7671 BUG_ON(ret);
7672 mutex_unlock(&root->fs_info->drop_mutex);
7673
7674 nr = trans->blocks_used;
7675 ret = btrfs_end_transaction(trans, root);
7676 BUG_ON(ret);
7677 btrfs_btree_balance_dirty(root, nr);
7678
7679 kfree(prev_root);
7680 prev_root = reloc_root;
7681 }
7682 if (prev_root) {
7683 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
7684 kfree(prev_root);
7685 }
7686 return 0;
7687}
7688
7689int btrfs_add_dead_reloc_root(struct btrfs_root *root)
7690{
7691 list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
7692 return 0;
7693}
7694
7695int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
7696{
7697 struct btrfs_root *reloc_root;
7698 struct btrfs_trans_handle *trans;
7699 struct btrfs_key location;
7700 int found;
7701 int ret;
7702
7703 mutex_lock(&root->fs_info->tree_reloc_mutex);
7704 ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
7705 BUG_ON(ret);
7706 found = !list_empty(&root->fs_info->dead_reloc_roots);
7707 mutex_unlock(&root->fs_info->tree_reloc_mutex);
7708
7709 if (found) {
7710 trans = btrfs_start_transaction(root, 1);
7711 BUG_ON(IS_ERR(trans));
7712 ret = btrfs_commit_transaction(trans, root);
7713 BUG_ON(ret);
7714 }
7715
7716 location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
7717 location.offset = (u64)-1;
7718 location.type = BTRFS_ROOT_ITEM_KEY;
7719
7720 reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
7721 BUG_ON(!reloc_root);
7722 ret = btrfs_orphan_cleanup(reloc_root);
7723 BUG_ON(ret);
7724 return 0;
7725}
7726
7727static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
7728 struct btrfs_root *root)
7729{
7730 struct btrfs_root *reloc_root;
7731 struct extent_buffer *eb;
7732 struct btrfs_root_item *root_item;
7733 struct btrfs_key root_key;
7734 int ret;
7735
7736 BUG_ON(!root->ref_cows);
7737 if (root->reloc_root)
7738 return 0;
7739
7740 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
7741 if (!root_item)
7742 return -ENOMEM;
7743
7744 ret = btrfs_copy_root(trans, root, root->commit_root,
7745 &eb, BTRFS_TREE_RELOC_OBJECTID);
7746 BUG_ON(ret);
7747
7748 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
7749 root_key.offset = root->root_key.objectid;
7750 root_key.type = BTRFS_ROOT_ITEM_KEY;
7751
7752 memcpy(root_item, &root->root_item, sizeof(root_item));
7753 btrfs_set_root_refs(root_item, 0);
7754 btrfs_set_root_bytenr(root_item, eb->start);
7755 btrfs_set_root_level(root_item, btrfs_header_level(eb));
7756 btrfs_set_root_generation(root_item, trans->transid);
7757
7758 btrfs_tree_unlock(eb);
7759 free_extent_buffer(eb);
7760
7761 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
7762 &root_key, root_item);
7763 BUG_ON(ret);
7764 kfree(root_item);
7765
7766 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
7767 &root_key);
7768 BUG_ON(IS_ERR(reloc_root));
7769 reloc_root->last_trans = trans->transid;
7770 reloc_root->commit_root = NULL;
7771 reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
7772
7773 root->reloc_root = reloc_root;
7774 return 0;
7775}
7776
7777/*
7778 * Core function of space balance.
7779 *
7780 * The idea is using reloc trees to relocate tree blocks in reference
7781 * counted roots. There is one reloc tree for each subvol, and all
7782 * reloc trees share same root key objectid. Reloc trees are snapshots
7783 * of the latest committed roots of subvols (root->commit_root).
7784 *
7785 * To relocate a tree block referenced by a subvol, there are two steps.
7786 * COW the block through subvol's reloc tree, then update block pointer
7787 * in the subvol to point to the new block. Since all reloc trees share
7788 * same root key objectid, doing special handing for tree blocks owned
7789 * by them is easy. Once a tree block has been COWed in one reloc tree,
7790 * we can use the resulting new block directly when the same block is
7791 * required to COW again through other reloc trees. By this way, relocated
7792 * tree blocks are shared between reloc trees, so they are also shared
7793 * between subvols.
7794 */
7795static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
7796 struct btrfs_root *root,
7797 struct btrfs_path *path,
7798 struct btrfs_key *first_key,
7799 struct btrfs_ref_path *ref_path,
7800 struct btrfs_block_group_cache *group,
7801 struct inode *reloc_inode)
7802{
7803 struct btrfs_root *reloc_root;
7804 struct extent_buffer *eb = NULL;
7805 struct btrfs_key *keys;
7806 u64 *nodes;
7807 int level;
7808 int shared_level;
7809 int lowest_level = 0;
7810 int ret;
7811
7812 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
7813 lowest_level = ref_path->owner_objectid;
7814
7815 if (!root->ref_cows) {
7816 path->lowest_level = lowest_level;
7817 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
7818 BUG_ON(ret < 0);
7819 path->lowest_level = 0;
7820 btrfs_release_path(root, path);
7821 return 0;
7822 }
7823
7824 mutex_lock(&root->fs_info->tree_reloc_mutex);
7825 ret = init_reloc_tree(trans, root);
7826 BUG_ON(ret);
7827 reloc_root = root->reloc_root;
7828
7829 shared_level = ref_path->shared_level;
7830 ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
7831
7832 keys = ref_path->node_keys;
7833 nodes = ref_path->new_nodes;
7834 memset(&keys[shared_level + 1], 0,
7835 sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
7836 memset(&nodes[shared_level + 1], 0,
7837 sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
7838
7839 if (nodes[lowest_level] == 0) {
7840 path->lowest_level = lowest_level;
7841 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
7842 0, 1);
7843 BUG_ON(ret);
7844 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
7845 eb = path->nodes[level];
7846 if (!eb || eb == reloc_root->node)
7847 break;
7848 nodes[level] = eb->start;
7849 if (level == 0)
7850 btrfs_item_key_to_cpu(eb, &keys[level], 0);
7851 else
7852 btrfs_node_key_to_cpu(eb, &keys[level], 0);
7853 }
7854 if (nodes[0] &&
7855 ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7856 eb = path->nodes[0];
7857 ret = replace_extents_in_leaf(trans, reloc_root, eb,
7858 group, reloc_inode);
7859 BUG_ON(ret);
7860 }
7861 btrfs_release_path(reloc_root, path);
7862 } else {
7863 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
7864 lowest_level);
7865 BUG_ON(ret);
7866 }
7867
7868 /*
7869 * replace tree blocks in the fs tree with tree blocks in
7870 * the reloc tree.
7871 */
7872 ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
7873 BUG_ON(ret < 0);
7874
7875 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7876 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
7877 0, 0);
7878 BUG_ON(ret);
7879 extent_buffer_get(path->nodes[0]);
7880 eb = path->nodes[0];
7881 btrfs_release_path(reloc_root, path);
7882 ret = invalidate_extent_cache(reloc_root, eb, group, root);
7883 BUG_ON(ret);
7884 free_extent_buffer(eb);
7885 }
7886
7887 mutex_unlock(&root->fs_info->tree_reloc_mutex);
7888 path->lowest_level = 0;
7889 return 0;
7890}
7891
7892static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
7893 struct btrfs_root *root,
7894 struct btrfs_path *path,
7895 struct btrfs_key *first_key,
7896 struct btrfs_ref_path *ref_path)
7897{
7898 int ret;
7899
7900 ret = relocate_one_path(trans, root, path, first_key,
7901 ref_path, NULL, NULL);
7902 BUG_ON(ret);
7903
7904 return 0;
7905}
7906
7907static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
7908 struct btrfs_root *extent_root,
7909 struct btrfs_path *path,
7910 struct btrfs_key *extent_key)
7911{
7912 int ret;
7913
7914 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
7915 if (ret)
7916 goto out;
7917 ret = btrfs_del_item(trans, extent_root, path);
7918out:
7919 btrfs_release_path(extent_root, path);
7920 return ret;
7921}
7922
7923static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
7924 struct btrfs_ref_path *ref_path)
7925{
7926 struct btrfs_key root_key;
7927
7928 root_key.objectid = ref_path->root_objectid;
7929 root_key.type = BTRFS_ROOT_ITEM_KEY;
7930 if (is_cowonly_root(ref_path->root_objectid))
7931 root_key.offset = 0;
7932 else
7933 root_key.offset = (u64)-1;
7934
7935 return btrfs_read_fs_root_no_name(fs_info, &root_key);
7936}
7937
7938static noinline int relocate_one_extent(struct btrfs_root *extent_root,
7939 struct btrfs_path *path,
7940 struct btrfs_key *extent_key,
7941 struct btrfs_block_group_cache *group,
7942 struct inode *reloc_inode, int pass)
7943{
7944 struct btrfs_trans_handle *trans;
7945 struct btrfs_root *found_root;
7946 struct btrfs_ref_path *ref_path = NULL;
7947 struct disk_extent *new_extents = NULL;
7948 int nr_extents = 0;
7949 int loops;
7950 int ret;
7951 int level;
7952 struct btrfs_key first_key;
7953 u64 prev_block = 0;
7954
7955
7956 trans = btrfs_start_transaction(extent_root, 1);
7957 BUG_ON(IS_ERR(trans));
7958
7959 if (extent_key->objectid == 0) {
7960 ret = del_extent_zero(trans, extent_root, path, extent_key);
7961 goto out;
7962 }
7963
7964 ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
7965 if (!ref_path) {
7966 ret = -ENOMEM;
7967 goto out;
7968 }
7969
7970 for (loops = 0; ; loops++) {
7971 if (loops == 0) {
7972 ret = btrfs_first_ref_path(trans, extent_root, ref_path,
7973 extent_key->objectid);
7974 } else {
7975 ret = btrfs_next_ref_path(trans, extent_root, ref_path);
7976 }
7977 if (ret < 0)
7978 goto out;
7979 if (ret > 0)
7980 break;
7981
7982 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
7983 ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
7984 continue;
7985
7986 found_root = read_ref_root(extent_root->fs_info, ref_path);
7987 BUG_ON(!found_root);
7988 /*
7989 * for reference counted tree, only process reference paths
7990 * rooted at the latest committed root.
7991 */
7992 if (found_root->ref_cows &&
7993 ref_path->root_generation != found_root->root_key.offset)
7994 continue;
7995
7996 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7997 if (pass == 0) {
7998 /*
7999 * copy data extents to new locations
8000 */
8001 u64 group_start = group->key.objectid;
8002 ret = relocate_data_extent(reloc_inode,
8003 extent_key,
8004 group_start);
8005 if (ret < 0)
8006 goto out;
8007 break;
8008 }
8009 level = 0;
8010 } else {
8011 level = ref_path->owner_objectid;
8012 }
8013
8014 if (prev_block != ref_path->nodes[level]) {
8015 struct extent_buffer *eb;
8016 u64 block_start = ref_path->nodes[level];
8017 u64 block_size = btrfs_level_size(found_root, level);
8018
8019 eb = read_tree_block(found_root, block_start,
8020 block_size, 0);
8021 if (!eb) {
8022 ret = -EIO;
8023 goto out;
8024 }
8025 btrfs_tree_lock(eb);
8026 BUG_ON(level != btrfs_header_level(eb));
8027
8028 if (level == 0)
8029 btrfs_item_key_to_cpu(eb, &first_key, 0);
8030 else
8031 btrfs_node_key_to_cpu(eb, &first_key, 0);
8032
8033 btrfs_tree_unlock(eb);
8034 free_extent_buffer(eb);
8035 prev_block = block_start;
8036 }
8037
8038 mutex_lock(&extent_root->fs_info->trans_mutex);
8039 btrfs_record_root_in_trans(found_root);
8040 mutex_unlock(&extent_root->fs_info->trans_mutex);
8041 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
8042 /*
8043 * try to update data extent references while
8044 * keeping metadata shared between snapshots.
8045 */
8046 if (pass == 1) {
8047 ret = relocate_one_path(trans, found_root,
8048 path, &first_key, ref_path,
8049 group, reloc_inode);
8050 if (ret < 0)
8051 goto out;
8052 continue;
8053 }
8054 /*
8055 * use fallback method to process the remaining
8056 * references.
8057 */
8058 if (!new_extents) {
8059 u64 group_start = group->key.objectid;
8060 new_extents = kmalloc(sizeof(*new_extents),
8061 GFP_NOFS);
8062 if (!new_extents) {
8063 ret = -ENOMEM;
8064 goto out;
8065 }
8066 nr_extents = 1;
8067 ret = get_new_locations(reloc_inode,
8068 extent_key,
8069 group_start, 1,
8070 &new_extents,
8071 &nr_extents);
8072 if (ret)
8073 goto out;
8074 }
8075 ret = replace_one_extent(trans, found_root,
8076 path, extent_key,
8077 &first_key, ref_path,
8078 new_extents, nr_extents);
8079 } else {
8080 ret = relocate_tree_block(trans, found_root, path,
8081 &first_key, ref_path);
8082 }
8083 if (ret < 0)
8084 goto out;
8085 }
8086 ret = 0;
8087out:
8088 btrfs_end_transaction(trans, extent_root);
8089 kfree(new_extents);
8090 kfree(ref_path);
8091 return ret;
8092}
8093#endif
8094
8095static u64 update_block_group_flags(struct btrfs_root *root, u64 flags) 6445static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
8096{ 6446{
8097 u64 num_devices; 6447 u64 num_devices;
@@ -8555,10 +6905,16 @@ int btrfs_read_block_groups(struct btrfs_root *root)
8555 ret = -ENOMEM; 6905 ret = -ENOMEM;
8556 goto error; 6906 goto error;
8557 } 6907 }
6908 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
6909 GFP_NOFS);
6910 if (!cache->free_space_ctl) {
6911 kfree(cache);
6912 ret = -ENOMEM;
6913 goto error;
6914 }
8558 6915
8559 atomic_set(&cache->count, 1); 6916 atomic_set(&cache->count, 1);
8560 spin_lock_init(&cache->lock); 6917 spin_lock_init(&cache->lock);
8561 spin_lock_init(&cache->tree_lock);
8562 cache->fs_info = info; 6918 cache->fs_info = info;
8563 INIT_LIST_HEAD(&cache->list); 6919 INIT_LIST_HEAD(&cache->list);
8564 INIT_LIST_HEAD(&cache->cluster_list); 6920 INIT_LIST_HEAD(&cache->cluster_list);
@@ -8566,24 +6922,18 @@ int btrfs_read_block_groups(struct btrfs_root *root)
8566 if (need_clear) 6922 if (need_clear)
8567 cache->disk_cache_state = BTRFS_DC_CLEAR; 6923 cache->disk_cache_state = BTRFS_DC_CLEAR;
8568 6924
8569 /*
8570 * we only want to have 32k of ram per block group for keeping
8571 * track of free space, and if we pass 1/2 of that we want to
8572 * start converting things over to using bitmaps
8573 */
8574 cache->extents_thresh = ((1024 * 32) / 2) /
8575 sizeof(struct btrfs_free_space);
8576
8577 read_extent_buffer(leaf, &cache->item, 6925 read_extent_buffer(leaf, &cache->item,
8578 btrfs_item_ptr_offset(leaf, path->slots[0]), 6926 btrfs_item_ptr_offset(leaf, path->slots[0]),
8579 sizeof(cache->item)); 6927 sizeof(cache->item));
8580 memcpy(&cache->key, &found_key, sizeof(found_key)); 6928 memcpy(&cache->key, &found_key, sizeof(found_key));
8581 6929
8582 key.objectid = found_key.objectid + found_key.offset; 6930 key.objectid = found_key.objectid + found_key.offset;
8583 btrfs_release_path(root, path); 6931 btrfs_release_path(path);
8584 cache->flags = btrfs_block_group_flags(&cache->item); 6932 cache->flags = btrfs_block_group_flags(&cache->item);
8585 cache->sectorsize = root->sectorsize; 6933 cache->sectorsize = root->sectorsize;
8586 6934
6935 btrfs_init_free_space_ctl(cache);
6936
8587 /* 6937 /*
8588 * We need to exclude the super stripes now so that the space 6938 * We need to exclude the super stripes now so that the space
8589 * info has super bytes accounted for, otherwise we'll think 6939 * info has super bytes accounted for, otherwise we'll think
@@ -8670,6 +7020,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8670 cache = kzalloc(sizeof(*cache), GFP_NOFS); 7020 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8671 if (!cache) 7021 if (!cache)
8672 return -ENOMEM; 7022 return -ENOMEM;
7023 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7024 GFP_NOFS);
7025 if (!cache->free_space_ctl) {
7026 kfree(cache);
7027 return -ENOMEM;
7028 }
8673 7029
8674 cache->key.objectid = chunk_offset; 7030 cache->key.objectid = chunk_offset;
8675 cache->key.offset = size; 7031 cache->key.offset = size;
@@ -8677,19 +7033,13 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8677 cache->sectorsize = root->sectorsize; 7033 cache->sectorsize = root->sectorsize;
8678 cache->fs_info = root->fs_info; 7034 cache->fs_info = root->fs_info;
8679 7035
8680 /*
8681 * we only want to have 32k of ram per block group for keeping track
8682 * of free space, and if we pass 1/2 of that we want to start
8683 * converting things over to using bitmaps
8684 */
8685 cache->extents_thresh = ((1024 * 32) / 2) /
8686 sizeof(struct btrfs_free_space);
8687 atomic_set(&cache->count, 1); 7036 atomic_set(&cache->count, 1);
8688 spin_lock_init(&cache->lock); 7037 spin_lock_init(&cache->lock);
8689 spin_lock_init(&cache->tree_lock);
8690 INIT_LIST_HEAD(&cache->list); 7038 INIT_LIST_HEAD(&cache->list);
8691 INIT_LIST_HEAD(&cache->cluster_list); 7039 INIT_LIST_HEAD(&cache->cluster_list);
8692 7040
7041 btrfs_init_free_space_ctl(cache);
7042
8693 btrfs_set_block_group_used(&cache->item, bytes_used); 7043 btrfs_set_block_group_used(&cache->item, bytes_used);
8694 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid); 7044 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8695 cache->flags = type; 7045 cache->flags = type;
@@ -8802,12 +7152,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8802 if (ret < 0) 7152 if (ret < 0)
8803 goto out; 7153 goto out;
8804 if (ret > 0) 7154 if (ret > 0)
8805 btrfs_release_path(tree_root, path); 7155 btrfs_release_path(path);
8806 if (ret == 0) { 7156 if (ret == 0) {
8807 ret = btrfs_del_item(trans, tree_root, path); 7157 ret = btrfs_del_item(trans, tree_root, path);
8808 if (ret) 7158 if (ret)
8809 goto out; 7159 goto out;
8810 btrfs_release_path(tree_root, path); 7160 btrfs_release_path(path);
8811 } 7161 }
8812 7162
8813 spin_lock(&root->fs_info->block_group_cache_lock); 7163 spin_lock(&root->fs_info->block_group_cache_lock);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 4f9893243dae..c5d9fbb92bc3 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -103,7 +103,7 @@ void extent_io_exit(void)
103} 103}
104 104
105void extent_io_tree_init(struct extent_io_tree *tree, 105void extent_io_tree_init(struct extent_io_tree *tree,
106 struct address_space *mapping, gfp_t mask) 106 struct address_space *mapping)
107{ 107{
108 tree->state = RB_ROOT; 108 tree->state = RB_ROOT;
109 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC); 109 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
@@ -441,6 +441,15 @@ static int clear_state_bit(struct extent_io_tree *tree,
441 return ret; 441 return ret;
442} 442}
443 443
444static struct extent_state *
445alloc_extent_state_atomic(struct extent_state *prealloc)
446{
447 if (!prealloc)
448 prealloc = alloc_extent_state(GFP_ATOMIC);
449
450 return prealloc;
451}
452
444/* 453/*
445 * clear some bits on a range in the tree. This may require splitting 454 * clear some bits on a range in the tree. This may require splitting
446 * or inserting elements in the tree, so the gfp mask is used to 455 * or inserting elements in the tree, so the gfp mask is used to
@@ -531,8 +540,8 @@ hit_next:
531 */ 540 */
532 541
533 if (state->start < start) { 542 if (state->start < start) {
534 if (!prealloc) 543 prealloc = alloc_extent_state_atomic(prealloc);
535 prealloc = alloc_extent_state(GFP_ATOMIC); 544 BUG_ON(!prealloc);
536 err = split_state(tree, state, prealloc, start); 545 err = split_state(tree, state, prealloc, start);
537 BUG_ON(err == -EEXIST); 546 BUG_ON(err == -EEXIST);
538 prealloc = NULL; 547 prealloc = NULL;
@@ -553,8 +562,8 @@ hit_next:
553 * on the first half 562 * on the first half
554 */ 563 */
555 if (state->start <= end && state->end > end) { 564 if (state->start <= end && state->end > end) {
556 if (!prealloc) 565 prealloc = alloc_extent_state_atomic(prealloc);
557 prealloc = alloc_extent_state(GFP_ATOMIC); 566 BUG_ON(!prealloc);
558 err = split_state(tree, state, prealloc, end + 1); 567 err = split_state(tree, state, prealloc, end + 1);
559 BUG_ON(err == -EEXIST); 568 BUG_ON(err == -EEXIST);
560 if (wake) 569 if (wake)
@@ -727,8 +736,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
727again: 736again:
728 if (!prealloc && (mask & __GFP_WAIT)) { 737 if (!prealloc && (mask & __GFP_WAIT)) {
729 prealloc = alloc_extent_state(mask); 738 prealloc = alloc_extent_state(mask);
730 if (!prealloc) 739 BUG_ON(!prealloc);
731 return -ENOMEM;
732 } 740 }
733 741
734 spin_lock(&tree->lock); 742 spin_lock(&tree->lock);
@@ -745,6 +753,8 @@ again:
745 */ 753 */
746 node = tree_search(tree, start); 754 node = tree_search(tree, start);
747 if (!node) { 755 if (!node) {
756 prealloc = alloc_extent_state_atomic(prealloc);
757 BUG_ON(!prealloc);
748 err = insert_state(tree, prealloc, start, end, &bits); 758 err = insert_state(tree, prealloc, start, end, &bits);
749 prealloc = NULL; 759 prealloc = NULL;
750 BUG_ON(err == -EEXIST); 760 BUG_ON(err == -EEXIST);
@@ -773,20 +783,18 @@ hit_next:
773 if (err) 783 if (err)
774 goto out; 784 goto out;
775 785
786 next_node = rb_next(node);
776 cache_state(state, cached_state); 787 cache_state(state, cached_state);
777 merge_state(tree, state); 788 merge_state(tree, state);
778 if (last_end == (u64)-1) 789 if (last_end == (u64)-1)
779 goto out; 790 goto out;
780 791
781 start = last_end + 1; 792 start = last_end + 1;
782 if (start < end && prealloc && !need_resched()) { 793 if (next_node && start < end && prealloc && !need_resched()) {
783 next_node = rb_next(node); 794 state = rb_entry(next_node, struct extent_state,
784 if (next_node) { 795 rb_node);
785 state = rb_entry(next_node, struct extent_state, 796 if (state->start == start)
786 rb_node); 797 goto hit_next;
787 if (state->start == start)
788 goto hit_next;
789 }
790 } 798 }
791 goto search_again; 799 goto search_again;
792 } 800 }
@@ -813,6 +821,9 @@ hit_next:
813 err = -EEXIST; 821 err = -EEXIST;
814 goto out; 822 goto out;
815 } 823 }
824
825 prealloc = alloc_extent_state_atomic(prealloc);
826 BUG_ON(!prealloc);
816 err = split_state(tree, state, prealloc, start); 827 err = split_state(tree, state, prealloc, start);
817 BUG_ON(err == -EEXIST); 828 BUG_ON(err == -EEXIST);
818 prealloc = NULL; 829 prealloc = NULL;
@@ -843,14 +854,25 @@ hit_next:
843 this_end = end; 854 this_end = end;
844 else 855 else
845 this_end = last_start - 1; 856 this_end = last_start - 1;
857
858 prealloc = alloc_extent_state_atomic(prealloc);
859 BUG_ON(!prealloc);
860
861 /*
862 * Avoid to free 'prealloc' if it can be merged with
863 * the later extent.
864 */
865 atomic_inc(&prealloc->refs);
846 err = insert_state(tree, prealloc, start, this_end, 866 err = insert_state(tree, prealloc, start, this_end,
847 &bits); 867 &bits);
848 BUG_ON(err == -EEXIST); 868 BUG_ON(err == -EEXIST);
849 if (err) { 869 if (err) {
870 free_extent_state(prealloc);
850 prealloc = NULL; 871 prealloc = NULL;
851 goto out; 872 goto out;
852 } 873 }
853 cache_state(prealloc, cached_state); 874 cache_state(prealloc, cached_state);
875 free_extent_state(prealloc);
854 prealloc = NULL; 876 prealloc = NULL;
855 start = this_end + 1; 877 start = this_end + 1;
856 goto search_again; 878 goto search_again;
@@ -867,6 +889,9 @@ hit_next:
867 err = -EEXIST; 889 err = -EEXIST;
868 goto out; 890 goto out;
869 } 891 }
892
893 prealloc = alloc_extent_state_atomic(prealloc);
894 BUG_ON(!prealloc);
870 err = split_state(tree, state, prealloc, end + 1); 895 err = split_state(tree, state, prealloc, end + 1);
871 BUG_ON(err == -EEXIST); 896 BUG_ON(err == -EEXIST);
872 897
@@ -943,13 +968,6 @@ int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
943 NULL, mask); 968 NULL, mask);
944} 969}
945 970
946static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
947 gfp_t mask)
948{
949 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0,
950 NULL, mask);
951}
952
953int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, 971int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
954 struct extent_state **cached_state, gfp_t mask) 972 struct extent_state **cached_state, gfp_t mask)
955{ 973{
@@ -965,11 +983,6 @@ static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
965 cached_state, mask); 983 cached_state, mask);
966} 984}
967 985
968int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
969{
970 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
971}
972
973/* 986/*
974 * either insert or lock state struct between start and end use mask to tell 987 * either insert or lock state struct between start and end use mask to tell
975 * us if waiting is desired. 988 * us if waiting is desired.
@@ -1030,25 +1043,6 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1030} 1043}
1031 1044
1032/* 1045/*
1033 * helper function to set pages and extents in the tree dirty
1034 */
1035int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
1036{
1037 unsigned long index = start >> PAGE_CACHE_SHIFT;
1038 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1039 struct page *page;
1040
1041 while (index <= end_index) {
1042 page = find_get_page(tree->mapping, index);
1043 BUG_ON(!page);
1044 __set_page_dirty_nobuffers(page);
1045 page_cache_release(page);
1046 index++;
1047 }
1048 return 0;
1049}
1050
1051/*
1052 * helper function to set both pages and extents in the tree writeback 1046 * helper function to set both pages and extents in the tree writeback
1053 */ 1047 */
1054static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) 1048static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
@@ -1821,46 +1815,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
1821 bio_put(bio); 1815 bio_put(bio);
1822} 1816}
1823 1817
1824/*
1825 * IO done from prepare_write is pretty simple, we just unlock
1826 * the structs in the extent tree when done, and set the uptodate bits
1827 * as appropriate.
1828 */
1829static void end_bio_extent_preparewrite(struct bio *bio, int err)
1830{
1831 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1832 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1833 struct extent_io_tree *tree;
1834 u64 start;
1835 u64 end;
1836
1837 do {
1838 struct page *page = bvec->bv_page;
1839 struct extent_state *cached = NULL;
1840 tree = &BTRFS_I(page->mapping->host)->io_tree;
1841
1842 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1843 bvec->bv_offset;
1844 end = start + bvec->bv_len - 1;
1845
1846 if (--bvec >= bio->bi_io_vec)
1847 prefetchw(&bvec->bv_page->flags);
1848
1849 if (uptodate) {
1850 set_extent_uptodate(tree, start, end, &cached,
1851 GFP_ATOMIC);
1852 } else {
1853 ClearPageUptodate(page);
1854 SetPageError(page);
1855 }
1856
1857 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
1858
1859 } while (bvec >= bio->bi_io_vec);
1860
1861 bio_put(bio);
1862}
1863
1864struct bio * 1818struct bio *
1865btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, 1819btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1866 gfp_t gfp_flags) 1820 gfp_t gfp_flags)
@@ -2009,7 +1963,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2009 struct btrfs_ordered_extent *ordered; 1963 struct btrfs_ordered_extent *ordered;
2010 int ret; 1964 int ret;
2011 int nr = 0; 1965 int nr = 0;
2012 size_t page_offset = 0; 1966 size_t pg_offset = 0;
2013 size_t iosize; 1967 size_t iosize;
2014 size_t disk_io_size; 1968 size_t disk_io_size;
2015 size_t blocksize = inode->i_sb->s_blocksize; 1969 size_t blocksize = inode->i_sb->s_blocksize;
@@ -2052,9 +2006,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2052 char *userpage; 2006 char *userpage;
2053 struct extent_state *cached = NULL; 2007 struct extent_state *cached = NULL;
2054 2008
2055 iosize = PAGE_CACHE_SIZE - page_offset; 2009 iosize = PAGE_CACHE_SIZE - pg_offset;
2056 userpage = kmap_atomic(page, KM_USER0); 2010 userpage = kmap_atomic(page, KM_USER0);
2057 memset(userpage + page_offset, 0, iosize); 2011 memset(userpage + pg_offset, 0, iosize);
2058 flush_dcache_page(page); 2012 flush_dcache_page(page);
2059 kunmap_atomic(userpage, KM_USER0); 2013 kunmap_atomic(userpage, KM_USER0);
2060 set_extent_uptodate(tree, cur, cur + iosize - 1, 2014 set_extent_uptodate(tree, cur, cur + iosize - 1,
@@ -2063,9 +2017,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2063 &cached, GFP_NOFS); 2017 &cached, GFP_NOFS);
2064 break; 2018 break;
2065 } 2019 }
2066 em = get_extent(inode, page, page_offset, cur, 2020 em = get_extent(inode, page, pg_offset, cur,
2067 end - cur + 1, 0); 2021 end - cur + 1, 0);
2068 if (IS_ERR(em) || !em) { 2022 if (IS_ERR_OR_NULL(em)) {
2069 SetPageError(page); 2023 SetPageError(page);
2070 unlock_extent(tree, cur, end, GFP_NOFS); 2024 unlock_extent(tree, cur, end, GFP_NOFS);
2071 break; 2025 break;
@@ -2103,7 +2057,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2103 struct extent_state *cached = NULL; 2057 struct extent_state *cached = NULL;
2104 2058
2105 userpage = kmap_atomic(page, KM_USER0); 2059 userpage = kmap_atomic(page, KM_USER0);
2106 memset(userpage + page_offset, 0, iosize); 2060 memset(userpage + pg_offset, 0, iosize);
2107 flush_dcache_page(page); 2061 flush_dcache_page(page);
2108 kunmap_atomic(userpage, KM_USER0); 2062 kunmap_atomic(userpage, KM_USER0);
2109 2063
@@ -2112,7 +2066,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2112 unlock_extent_cached(tree, cur, cur + iosize - 1, 2066 unlock_extent_cached(tree, cur, cur + iosize - 1,
2113 &cached, GFP_NOFS); 2067 &cached, GFP_NOFS);
2114 cur = cur + iosize; 2068 cur = cur + iosize;
2115 page_offset += iosize; 2069 pg_offset += iosize;
2116 continue; 2070 continue;
2117 } 2071 }
2118 /* the get_extent function already copied into the page */ 2072 /* the get_extent function already copied into the page */
@@ -2121,7 +2075,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2121 check_page_uptodate(tree, page); 2075 check_page_uptodate(tree, page);
2122 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2076 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2123 cur = cur + iosize; 2077 cur = cur + iosize;
2124 page_offset += iosize; 2078 pg_offset += iosize;
2125 continue; 2079 continue;
2126 } 2080 }
2127 /* we have an inline extent but it didn't get marked up 2081 /* we have an inline extent but it didn't get marked up
@@ -2131,7 +2085,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2131 SetPageError(page); 2085 SetPageError(page);
2132 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2086 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2133 cur = cur + iosize; 2087 cur = cur + iosize;
2134 page_offset += iosize; 2088 pg_offset += iosize;
2135 continue; 2089 continue;
2136 } 2090 }
2137 2091
@@ -2144,7 +2098,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2144 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; 2098 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2145 pnr -= page->index; 2099 pnr -= page->index;
2146 ret = submit_extent_page(READ, tree, page, 2100 ret = submit_extent_page(READ, tree, page,
2147 sector, disk_io_size, page_offset, 2101 sector, disk_io_size, pg_offset,
2148 bdev, bio, pnr, 2102 bdev, bio, pnr,
2149 end_bio_extent_readpage, mirror_num, 2103 end_bio_extent_readpage, mirror_num,
2150 *bio_flags, 2104 *bio_flags,
@@ -2155,7 +2109,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2155 if (ret) 2109 if (ret)
2156 SetPageError(page); 2110 SetPageError(page);
2157 cur = cur + iosize; 2111 cur = cur + iosize;
2158 page_offset += iosize; 2112 pg_offset += iosize;
2159 } 2113 }
2160out: 2114out:
2161 if (!nr) { 2115 if (!nr) {
@@ -2351,7 +2305,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2351 } 2305 }
2352 em = epd->get_extent(inode, page, pg_offset, cur, 2306 em = epd->get_extent(inode, page, pg_offset, cur,
2353 end - cur + 1, 1); 2307 end - cur + 1, 1);
2354 if (IS_ERR(em) || !em) { 2308 if (IS_ERR_OR_NULL(em)) {
2355 SetPageError(page); 2309 SetPageError(page);
2356 break; 2310 break;
2357 } 2311 }
@@ -2730,128 +2684,6 @@ int extent_invalidatepage(struct extent_io_tree *tree,
2730} 2684}
2731 2685
2732/* 2686/*
2733 * simple commit_write call, set_range_dirty is used to mark both
2734 * the pages and the extent records as dirty
2735 */
2736int extent_commit_write(struct extent_io_tree *tree,
2737 struct inode *inode, struct page *page,
2738 unsigned from, unsigned to)
2739{
2740 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2741
2742 set_page_extent_mapped(page);
2743 set_page_dirty(page);
2744
2745 if (pos > inode->i_size) {
2746 i_size_write(inode, pos);
2747 mark_inode_dirty(inode);
2748 }
2749 return 0;
2750}
2751
2752int extent_prepare_write(struct extent_io_tree *tree,
2753 struct inode *inode, struct page *page,
2754 unsigned from, unsigned to, get_extent_t *get_extent)
2755{
2756 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2757 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2758 u64 block_start;
2759 u64 orig_block_start;
2760 u64 block_end;
2761 u64 cur_end;
2762 struct extent_map *em;
2763 unsigned blocksize = 1 << inode->i_blkbits;
2764 size_t page_offset = 0;
2765 size_t block_off_start;
2766 size_t block_off_end;
2767 int err = 0;
2768 int iocount = 0;
2769 int ret = 0;
2770 int isnew;
2771
2772 set_page_extent_mapped(page);
2773
2774 block_start = (page_start + from) & ~((u64)blocksize - 1);
2775 block_end = (page_start + to - 1) | (blocksize - 1);
2776 orig_block_start = block_start;
2777
2778 lock_extent(tree, page_start, page_end, GFP_NOFS);
2779 while (block_start <= block_end) {
2780 em = get_extent(inode, page, page_offset, block_start,
2781 block_end - block_start + 1, 1);
2782 if (IS_ERR(em) || !em)
2783 goto err;
2784
2785 cur_end = min(block_end, extent_map_end(em) - 1);
2786 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2787 block_off_end = block_off_start + blocksize;
2788 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2789
2790 if (!PageUptodate(page) && isnew &&
2791 (block_off_end > to || block_off_start < from)) {
2792 void *kaddr;
2793
2794 kaddr = kmap_atomic(page, KM_USER0);
2795 if (block_off_end > to)
2796 memset(kaddr + to, 0, block_off_end - to);
2797 if (block_off_start < from)
2798 memset(kaddr + block_off_start, 0,
2799 from - block_off_start);
2800 flush_dcache_page(page);
2801 kunmap_atomic(kaddr, KM_USER0);
2802 }
2803 if ((em->block_start != EXTENT_MAP_HOLE &&
2804 em->block_start != EXTENT_MAP_INLINE) &&
2805 !isnew && !PageUptodate(page) &&
2806 (block_off_end > to || block_off_start < from) &&
2807 !test_range_bit(tree, block_start, cur_end,
2808 EXTENT_UPTODATE, 1, NULL)) {
2809 u64 sector;
2810 u64 extent_offset = block_start - em->start;
2811 size_t iosize;
2812 sector = (em->block_start + extent_offset) >> 9;
2813 iosize = (cur_end - block_start + blocksize) &
2814 ~((u64)blocksize - 1);
2815 /*
2816 * we've already got the extent locked, but we
2817 * need to split the state such that our end_bio
2818 * handler can clear the lock.
2819 */
2820 set_extent_bit(tree, block_start,
2821 block_start + iosize - 1,
2822 EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS);
2823 ret = submit_extent_page(READ, tree, page,
2824 sector, iosize, page_offset, em->bdev,
2825 NULL, 1,
2826 end_bio_extent_preparewrite, 0,
2827 0, 0);
2828 if (ret && !err)
2829 err = ret;
2830 iocount++;
2831 block_start = block_start + iosize;
2832 } else {
2833 struct extent_state *cached = NULL;
2834
2835 set_extent_uptodate(tree, block_start, cur_end, &cached,
2836 GFP_NOFS);
2837 unlock_extent_cached(tree, block_start, cur_end,
2838 &cached, GFP_NOFS);
2839 block_start = cur_end + 1;
2840 }
2841 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2842 free_extent_map(em);
2843 }
2844 if (iocount) {
2845 wait_extent_bit(tree, orig_block_start,
2846 block_end, EXTENT_LOCKED);
2847 }
2848 check_page_uptodate(tree, page);
2849err:
2850 /* FIXME, zero out newly allocated blocks on error */
2851 return err;
2852}
2853
2854/*
2855 * a helper for releasepage, this tests for areas of the page that 2687 * a helper for releasepage, this tests for areas of the page that
2856 * are locked or under IO and drops the related state bits if it is safe 2688 * are locked or under IO and drops the related state bits if it is safe
2857 * to drop the page. 2689 * to drop the page.
@@ -2909,7 +2741,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
2909 len = end - start + 1; 2741 len = end - start + 1;
2910 write_lock(&map->lock); 2742 write_lock(&map->lock);
2911 em = lookup_extent_mapping(map, start, len); 2743 em = lookup_extent_mapping(map, start, len);
2912 if (!em || IS_ERR(em)) { 2744 if (IS_ERR_OR_NULL(em)) {
2913 write_unlock(&map->lock); 2745 write_unlock(&map->lock);
2914 break; 2746 break;
2915 } 2747 }
@@ -2937,33 +2769,6 @@ int try_release_extent_mapping(struct extent_map_tree *map,
2937 return try_release_extent_state(map, tree, page, mask); 2769 return try_release_extent_state(map, tree, page, mask);
2938} 2770}
2939 2771
2940sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2941 get_extent_t *get_extent)
2942{
2943 struct inode *inode = mapping->host;
2944 struct extent_state *cached_state = NULL;
2945 u64 start = iblock << inode->i_blkbits;
2946 sector_t sector = 0;
2947 size_t blksize = (1 << inode->i_blkbits);
2948 struct extent_map *em;
2949
2950 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2951 0, &cached_state, GFP_NOFS);
2952 em = get_extent(inode, NULL, 0, start, blksize, 0);
2953 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start,
2954 start + blksize - 1, &cached_state, GFP_NOFS);
2955 if (!em || IS_ERR(em))
2956 return 0;
2957
2958 if (em->block_start > EXTENT_MAP_LAST_BYTE)
2959 goto out;
2960
2961 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2962out:
2963 free_extent_map(em);
2964 return sector;
2965}
2966
2967/* 2772/*
2968 * helper function for fiemap, which doesn't want to see any holes. 2773 * helper function for fiemap, which doesn't want to see any holes.
2969 * This maps until we find something past 'last' 2774 * This maps until we find something past 'last'
@@ -2986,7 +2791,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
2986 break; 2791 break;
2987 len = (len + sectorsize - 1) & ~(sectorsize - 1); 2792 len = (len + sectorsize - 1) & ~(sectorsize - 1);
2988 em = get_extent(inode, NULL, 0, offset, len, 0); 2793 em = get_extent(inode, NULL, 0, offset, len, 0);
2989 if (!em || IS_ERR(em)) 2794 if (IS_ERR_OR_NULL(em))
2990 return em; 2795 return em;
2991 2796
2992 /* if this isn't a hole return it */ 2797 /* if this isn't a hole return it */
@@ -3040,7 +2845,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3040 * because there might be preallocation past i_size 2845 * because there might be preallocation past i_size
3041 */ 2846 */
3042 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, 2847 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
3043 path, inode->i_ino, -1, 0); 2848 path, btrfs_ino(inode), -1, 0);
3044 if (ret < 0) { 2849 if (ret < 0) {
3045 btrfs_free_path(path); 2850 btrfs_free_path(path);
3046 return ret; 2851 return ret;
@@ -3053,7 +2858,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3053 found_type = btrfs_key_type(&found_key); 2858 found_type = btrfs_key_type(&found_key);
3054 2859
3055 /* No extents, but there might be delalloc bits */ 2860 /* No extents, but there might be delalloc bits */
3056 if (found_key.objectid != inode->i_ino || 2861 if (found_key.objectid != btrfs_ino(inode) ||
3057 found_type != BTRFS_EXTENT_DATA_KEY) { 2862 found_type != BTRFS_EXTENT_DATA_KEY) {
3058 /* have to trust i_size as the end */ 2863 /* have to trust i_size as the end */
3059 last = (u64)-1; 2864 last = (u64)-1;
@@ -3276,8 +3081,7 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3276 3081
3277struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, 3082struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3278 u64 start, unsigned long len, 3083 u64 start, unsigned long len,
3279 struct page *page0, 3084 struct page *page0)
3280 gfp_t mask)
3281{ 3085{
3282 unsigned long num_pages = num_extent_pages(start, len); 3086 unsigned long num_pages = num_extent_pages(start, len);
3283 unsigned long i; 3087 unsigned long i;
@@ -3298,7 +3102,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3298 } 3102 }
3299 rcu_read_unlock(); 3103 rcu_read_unlock();
3300 3104
3301 eb = __alloc_extent_buffer(tree, start, len, mask); 3105 eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
3302 if (!eb) 3106 if (!eb)
3303 return NULL; 3107 return NULL;
3304 3108
@@ -3315,7 +3119,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3315 i = 0; 3119 i = 0;
3316 } 3120 }
3317 for (; i < num_pages; i++, index++) { 3121 for (; i < num_pages; i++, index++) {
3318 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); 3122 p = find_or_create_page(mapping, index, GFP_NOFS | __GFP_HIGHMEM);
3319 if (!p) { 3123 if (!p) {
3320 WARN_ON(1); 3124 WARN_ON(1);
3321 goto free_eb; 3125 goto free_eb;
@@ -3387,8 +3191,7 @@ free_eb:
3387} 3191}
3388 3192
3389struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, 3193struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3390 u64 start, unsigned long len, 3194 u64 start, unsigned long len)
3391 gfp_t mask)
3392{ 3195{
3393 struct extent_buffer *eb; 3196 struct extent_buffer *eb;
3394 3197
@@ -3449,13 +3252,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3449 return 0; 3252 return 0;
3450} 3253}
3451 3254
3452int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
3453 struct extent_buffer *eb)
3454{
3455 return wait_on_extent_writeback(tree, eb->start,
3456 eb->start + eb->len - 1);
3457}
3458
3459int set_extent_buffer_dirty(struct extent_io_tree *tree, 3255int set_extent_buffer_dirty(struct extent_io_tree *tree,
3460 struct extent_buffer *eb) 3256 struct extent_buffer *eb)
3461{ 3257{
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index af2d7179c372..4e8445a4757c 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -153,23 +153,14 @@ static inline int extent_compress_type(unsigned long bio_flags)
153 153
154struct extent_map_tree; 154struct extent_map_tree;
155 155
156static inline struct extent_state *extent_state_next(struct extent_state *state)
157{
158 struct rb_node *node;
159 node = rb_next(&state->rb_node);
160 if (!node)
161 return NULL;
162 return rb_entry(node, struct extent_state, rb_node);
163}
164
165typedef struct extent_map *(get_extent_t)(struct inode *inode, 156typedef struct extent_map *(get_extent_t)(struct inode *inode,
166 struct page *page, 157 struct page *page,
167 size_t page_offset, 158 size_t pg_offset,
168 u64 start, u64 len, 159 u64 start, u64 len,
169 int create); 160 int create);
170 161
171void extent_io_tree_init(struct extent_io_tree *tree, 162void extent_io_tree_init(struct extent_io_tree *tree,
172 struct address_space *mapping, gfp_t mask); 163 struct address_space *mapping);
173int try_release_extent_mapping(struct extent_map_tree *map, 164int try_release_extent_mapping(struct extent_map_tree *map,
174 struct extent_io_tree *tree, struct page *page, 165 struct extent_io_tree *tree, struct page *page,
175 gfp_t mask); 166 gfp_t mask);
@@ -215,14 +206,8 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
215 gfp_t mask); 206 gfp_t mask);
216int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, 207int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
217 gfp_t mask); 208 gfp_t mask);
218int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
219 gfp_t mask);
220int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start,
221 u64 end, gfp_t mask);
222int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, 209int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
223 struct extent_state **cached_state, gfp_t mask); 210 struct extent_state **cached_state, gfp_t mask);
224int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
225 gfp_t mask);
226int find_first_extent_bit(struct extent_io_tree *tree, u64 start, 211int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
227 u64 *start_ret, u64 *end_ret, int bits); 212 u64 *start_ret, u64 *end_ret, int bits);
228struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, 213struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
@@ -243,28 +228,17 @@ int extent_readpages(struct extent_io_tree *tree,
243 struct address_space *mapping, 228 struct address_space *mapping,
244 struct list_head *pages, unsigned nr_pages, 229 struct list_head *pages, unsigned nr_pages,
245 get_extent_t get_extent); 230 get_extent_t get_extent);
246int extent_prepare_write(struct extent_io_tree *tree,
247 struct inode *inode, struct page *page,
248 unsigned from, unsigned to, get_extent_t *get_extent);
249int extent_commit_write(struct extent_io_tree *tree,
250 struct inode *inode, struct page *page,
251 unsigned from, unsigned to);
252sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
253 get_extent_t *get_extent);
254int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 231int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
255 __u64 start, __u64 len, get_extent_t *get_extent); 232 __u64 start, __u64 len, get_extent_t *get_extent);
256int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end);
257int set_state_private(struct extent_io_tree *tree, u64 start, u64 private); 233int set_state_private(struct extent_io_tree *tree, u64 start, u64 private);
258int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private); 234int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
259void set_page_extent_mapped(struct page *page); 235void set_page_extent_mapped(struct page *page);
260 236
261struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, 237struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
262 u64 start, unsigned long len, 238 u64 start, unsigned long len,
263 struct page *page0, 239 struct page *page0);
264 gfp_t mask);
265struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, 240struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
266 u64 start, unsigned long len, 241 u64 start, unsigned long len);
267 gfp_t mask);
268void free_extent_buffer(struct extent_buffer *eb); 242void free_extent_buffer(struct extent_buffer *eb);
269int read_extent_buffer_pages(struct extent_io_tree *tree, 243int read_extent_buffer_pages(struct extent_io_tree *tree,
270 struct extent_buffer *eb, u64 start, int wait, 244 struct extent_buffer *eb, u64 start, int wait,
@@ -292,16 +266,11 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
292 unsigned long src_offset, unsigned long len); 266 unsigned long src_offset, unsigned long len);
293void memset_extent_buffer(struct extent_buffer *eb, char c, 267void memset_extent_buffer(struct extent_buffer *eb, char c,
294 unsigned long start, unsigned long len); 268 unsigned long start, unsigned long len);
295int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
296 struct extent_buffer *eb);
297int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end);
298int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits); 269int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits);
299int clear_extent_buffer_dirty(struct extent_io_tree *tree, 270int clear_extent_buffer_dirty(struct extent_io_tree *tree,
300 struct extent_buffer *eb); 271 struct extent_buffer *eb);
301int set_extent_buffer_dirty(struct extent_io_tree *tree, 272int set_extent_buffer_dirty(struct extent_io_tree *tree,
302 struct extent_buffer *eb); 273 struct extent_buffer *eb);
303int test_extent_buffer_dirty(struct extent_io_tree *tree,
304 struct extent_buffer *eb);
305int set_extent_buffer_uptodate(struct extent_io_tree *tree, 274int set_extent_buffer_uptodate(struct extent_io_tree *tree,
306 struct extent_buffer *eb); 275 struct extent_buffer *eb);
307int clear_extent_buffer_uptodate(struct extent_io_tree *tree, 276int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
@@ -319,7 +288,6 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
319 unsigned long *map_start, 288 unsigned long *map_start,
320 unsigned long *map_len, int km); 289 unsigned long *map_len, int km);
321void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km); 290void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km);
322int release_extent_buffer_tail_pages(struct extent_buffer *eb);
323int extent_range_uptodate(struct extent_io_tree *tree, 291int extent_range_uptodate(struct extent_io_tree *tree,
324 u64 start, u64 end); 292 u64 start, u64 end);
325int extent_clear_unlock_delalloc(struct inode *inode, 293int extent_clear_unlock_delalloc(struct inode *inode,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index a24a3f2fa13e..2d0410344ea3 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -28,12 +28,11 @@ void extent_map_exit(void)
28/** 28/**
29 * extent_map_tree_init - initialize extent map tree 29 * extent_map_tree_init - initialize extent map tree
30 * @tree: tree to initialize 30 * @tree: tree to initialize
31 * @mask: flags for memory allocations during tree operations
32 * 31 *
33 * Initialize the extent tree @tree. Should be called for each new inode 32 * Initialize the extent tree @tree. Should be called for each new inode
34 * or other user of the extent_map interface. 33 * or other user of the extent_map interface.
35 */ 34 */
36void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask) 35void extent_map_tree_init(struct extent_map_tree *tree)
37{ 36{
38 tree->map = RB_ROOT; 37 tree->map = RB_ROOT;
39 rwlock_init(&tree->lock); 38 rwlock_init(&tree->lock);
@@ -41,16 +40,15 @@ void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
41 40
42/** 41/**
43 * alloc_extent_map - allocate new extent map structure 42 * alloc_extent_map - allocate new extent map structure
44 * @mask: memory allocation flags
45 * 43 *
46 * Allocate a new extent_map structure. The new structure is 44 * Allocate a new extent_map structure. The new structure is
47 * returned with a reference count of one and needs to be 45 * returned with a reference count of one and needs to be
48 * freed using free_extent_map() 46 * freed using free_extent_map()
49 */ 47 */
50struct extent_map *alloc_extent_map(gfp_t mask) 48struct extent_map *alloc_extent_map(void)
51{ 49{
52 struct extent_map *em; 50 struct extent_map *em;
53 em = kmem_cache_alloc(extent_map_cache, mask); 51 em = kmem_cache_alloc(extent_map_cache, GFP_NOFS);
54 if (!em) 52 if (!em)
55 return NULL; 53 return NULL;
56 em->in_tree = 0; 54 em->in_tree = 0;
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index 28b44dbd1e35..33a7890b1f40 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -49,14 +49,14 @@ static inline u64 extent_map_block_end(struct extent_map *em)
49 return em->block_start + em->block_len; 49 return em->block_start + em->block_len;
50} 50}
51 51
52void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask); 52void extent_map_tree_init(struct extent_map_tree *tree);
53struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, 53struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
54 u64 start, u64 len); 54 u64 start, u64 len);
55int add_extent_mapping(struct extent_map_tree *tree, 55int add_extent_mapping(struct extent_map_tree *tree,
56 struct extent_map *em); 56 struct extent_map *em);
57int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em); 57int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
58 58
59struct extent_map *alloc_extent_map(gfp_t mask); 59struct extent_map *alloc_extent_map(void);
60void free_extent_map(struct extent_map *em); 60void free_extent_map(struct extent_map *em);
61int __init extent_map_init(void); 61int __init extent_map_init(void);
62void extent_map_exit(void); 62void extent_map_exit(void);
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index a6a9d4e8b491..90d4ee52cd45 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -193,7 +193,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
193 u32 item_size; 193 u32 item_size;
194 194
195 if (item) 195 if (item)
196 btrfs_release_path(root, path); 196 btrfs_release_path(path);
197 item = btrfs_lookup_csum(NULL, root->fs_info->csum_root, 197 item = btrfs_lookup_csum(NULL, root->fs_info->csum_root,
198 path, disk_bytenr, 0); 198 path, disk_bytenr, 0);
199 if (IS_ERR(item)) { 199 if (IS_ERR(item)) {
@@ -208,12 +208,13 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
208 EXTENT_NODATASUM, GFP_NOFS); 208 EXTENT_NODATASUM, GFP_NOFS);
209 } else { 209 } else {
210 printk(KERN_INFO "btrfs no csum found " 210 printk(KERN_INFO "btrfs no csum found "
211 "for inode %lu start %llu\n", 211 "for inode %llu start %llu\n",
212 inode->i_ino, 212 (unsigned long long)
213 btrfs_ino(inode),
213 (unsigned long long)offset); 214 (unsigned long long)offset);
214 } 215 }
215 item = NULL; 216 item = NULL;
216 btrfs_release_path(root, path); 217 btrfs_release_path(path);
217 goto found; 218 goto found;
218 } 219 }
219 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 220 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
@@ -266,7 +267,7 @@ int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
266} 267}
267 268
268int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, 269int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
269 struct list_head *list) 270 struct list_head *list, int search_commit)
270{ 271{
271 struct btrfs_key key; 272 struct btrfs_key key;
272 struct btrfs_path *path; 273 struct btrfs_path *path;
@@ -283,6 +284,12 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
283 path = btrfs_alloc_path(); 284 path = btrfs_alloc_path();
284 BUG_ON(!path); 285 BUG_ON(!path);
285 286
287 if (search_commit) {
288 path->skip_locking = 1;
289 path->reada = 2;
290 path->search_commit_root = 1;
291 }
292
286 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 293 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
287 key.offset = start; 294 key.offset = start;
288 key.type = BTRFS_EXTENT_CSUM_KEY; 295 key.type = BTRFS_EXTENT_CSUM_KEY;
@@ -495,7 +502,6 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,
495 u32 new_size = (bytenr - key->offset) >> blocksize_bits; 502 u32 new_size = (bytenr - key->offset) >> blocksize_bits;
496 new_size *= csum_size; 503 new_size *= csum_size;
497 ret = btrfs_truncate_item(trans, root, path, new_size, 1); 504 ret = btrfs_truncate_item(trans, root, path, new_size, 1);
498 BUG_ON(ret);
499 } else if (key->offset >= bytenr && csum_end > end_byte && 505 } else if (key->offset >= bytenr && csum_end > end_byte &&
500 end_byte > key->offset) { 506 end_byte > key->offset) {
501 /* 507 /*
@@ -508,7 +514,6 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,
508 new_size *= csum_size; 514 new_size *= csum_size;
509 515
510 ret = btrfs_truncate_item(trans, root, path, new_size, 0); 516 ret = btrfs_truncate_item(trans, root, path, new_size, 0);
511 BUG_ON(ret);
512 517
513 key->offset = end_byte; 518 key->offset = end_byte;
514 ret = btrfs_set_item_key_safe(trans, root, path, key); 519 ret = btrfs_set_item_key_safe(trans, root, path, key);
@@ -551,10 +556,10 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
551 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 556 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
552 if (ret > 0) { 557 if (ret > 0) {
553 if (path->slots[0] == 0) 558 if (path->slots[0] == 0)
554 goto out; 559 break;
555 path->slots[0]--; 560 path->slots[0]--;
556 } else if (ret < 0) { 561 } else if (ret < 0) {
557 goto out; 562 break;
558 } 563 }
559 564
560 leaf = path->nodes[0]; 565 leaf = path->nodes[0];
@@ -579,7 +584,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
579 /* delete the entire item, it is inside our range */ 584 /* delete the entire item, it is inside our range */
580 if (key.offset >= bytenr && csum_end <= end_byte) { 585 if (key.offset >= bytenr && csum_end <= end_byte) {
581 ret = btrfs_del_item(trans, root, path); 586 ret = btrfs_del_item(trans, root, path);
582 BUG_ON(ret); 587 if (ret)
588 goto out;
583 if (key.offset == bytenr) 589 if (key.offset == bytenr)
584 break; 590 break;
585 } else if (key.offset < bytenr && csum_end > end_byte) { 591 } else if (key.offset < bytenr && csum_end > end_byte) {
@@ -631,11 +637,12 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
631 if (key.offset < bytenr) 637 if (key.offset < bytenr)
632 break; 638 break;
633 } 639 }
634 btrfs_release_path(root, path); 640 btrfs_release_path(path);
635 } 641 }
642 ret = 0;
636out: 643out:
637 btrfs_free_path(path); 644 btrfs_free_path(path);
638 return 0; 645 return ret;
639} 646}
640 647
641int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, 648int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
@@ -722,7 +729,7 @@ again:
722 * at this point, we know the tree has an item, but it isn't big 729 * at this point, we know the tree has an item, but it isn't big
723 * enough yet to put our csum in. Grow it 730 * enough yet to put our csum in. Grow it
724 */ 731 */
725 btrfs_release_path(root, path); 732 btrfs_release_path(path);
726 ret = btrfs_search_slot(trans, root, &file_key, path, 733 ret = btrfs_search_slot(trans, root, &file_key, path,
727 csum_size, 1); 734 csum_size, 1);
728 if (ret < 0) 735 if (ret < 0)
@@ -761,12 +768,11 @@ again:
761 goto insert; 768 goto insert;
762 769
763 ret = btrfs_extend_item(trans, root, path, diff); 770 ret = btrfs_extend_item(trans, root, path, diff);
764 BUG_ON(ret);
765 goto csum; 771 goto csum;
766 } 772 }
767 773
768insert: 774insert:
769 btrfs_release_path(root, path); 775 btrfs_release_path(path);
770 csum_offset = 0; 776 csum_offset = 0;
771 if (found_next) { 777 if (found_next) {
772 u64 tmp = total_bytes + root->sectorsize; 778 u64 tmp = total_bytes + root->sectorsize;
@@ -850,7 +856,7 @@ next_sector:
850 } 856 }
851 btrfs_mark_buffer_dirty(path->nodes[0]); 857 btrfs_mark_buffer_dirty(path->nodes[0]);
852 if (total_bytes < sums->len) { 858 if (total_bytes < sums->len) {
853 btrfs_release_path(root, path); 859 btrfs_release_path(path);
854 cond_resched(); 860 cond_resched();
855 goto again; 861 goto again;
856 } 862 }
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 75899a01dded..c6a22d783c35 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -40,6 +40,263 @@
40#include "locking.h" 40#include "locking.h"
41#include "compat.h" 41#include "compat.h"
42 42
43/*
44 * when auto defrag is enabled we
45 * queue up these defrag structs to remember which
46 * inodes need defragging passes
47 */
48struct inode_defrag {
49 struct rb_node rb_node;
50 /* objectid */
51 u64 ino;
52 /*
53 * transid where the defrag was added, we search for
54 * extents newer than this
55 */
56 u64 transid;
57
58 /* root objectid */
59 u64 root;
60
61 /* last offset we were able to defrag */
62 u64 last_offset;
63
64 /* if we've wrapped around back to zero once already */
65 int cycled;
66};
67
68/* pop a record for an inode into the defrag tree. The lock
69 * must be held already
70 *
71 * If you're inserting a record for an older transid than an
72 * existing record, the transid already in the tree is lowered
73 *
74 * If an existing record is found the defrag item you
75 * pass in is freed
76 */
77static int __btrfs_add_inode_defrag(struct inode *inode,
78 struct inode_defrag *defrag)
79{
80 struct btrfs_root *root = BTRFS_I(inode)->root;
81 struct inode_defrag *entry;
82 struct rb_node **p;
83 struct rb_node *parent = NULL;
84
85 p = &root->fs_info->defrag_inodes.rb_node;
86 while (*p) {
87 parent = *p;
88 entry = rb_entry(parent, struct inode_defrag, rb_node);
89
90 if (defrag->ino < entry->ino)
91 p = &parent->rb_left;
92 else if (defrag->ino > entry->ino)
93 p = &parent->rb_right;
94 else {
95 /* if we're reinserting an entry for
96 * an old defrag run, make sure to
97 * lower the transid of our existing record
98 */
99 if (defrag->transid < entry->transid)
100 entry->transid = defrag->transid;
101 if (defrag->last_offset > entry->last_offset)
102 entry->last_offset = defrag->last_offset;
103 goto exists;
104 }
105 }
106 BTRFS_I(inode)->in_defrag = 1;
107 rb_link_node(&defrag->rb_node, parent, p);
108 rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
109 return 0;
110
111exists:
112 kfree(defrag);
113 return 0;
114
115}
116
117/*
118 * insert a defrag record for this inode if auto defrag is
119 * enabled
120 */
121int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
122 struct inode *inode)
123{
124 struct btrfs_root *root = BTRFS_I(inode)->root;
125 struct inode_defrag *defrag;
126 int ret = 0;
127 u64 transid;
128
129 if (!btrfs_test_opt(root, AUTO_DEFRAG))
130 return 0;
131
132 if (root->fs_info->closing)
133 return 0;
134
135 if (BTRFS_I(inode)->in_defrag)
136 return 0;
137
138 if (trans)
139 transid = trans->transid;
140 else
141 transid = BTRFS_I(inode)->root->last_trans;
142
143 defrag = kzalloc(sizeof(*defrag), GFP_NOFS);
144 if (!defrag)
145 return -ENOMEM;
146
147 defrag->ino = inode->i_ino;
148 defrag->transid = transid;
149 defrag->root = root->root_key.objectid;
150
151 spin_lock(&root->fs_info->defrag_inodes_lock);
152 if (!BTRFS_I(inode)->in_defrag)
153 ret = __btrfs_add_inode_defrag(inode, defrag);
154 spin_unlock(&root->fs_info->defrag_inodes_lock);
155 return ret;
156}
157
158/*
159 * must be called with the defrag_inodes lock held
160 */
161struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info, u64 ino,
162 struct rb_node **next)
163{
164 struct inode_defrag *entry = NULL;
165 struct rb_node *p;
166 struct rb_node *parent = NULL;
167
168 p = info->defrag_inodes.rb_node;
169 while (p) {
170 parent = p;
171 entry = rb_entry(parent, struct inode_defrag, rb_node);
172
173 if (ino < entry->ino)
174 p = parent->rb_left;
175 else if (ino > entry->ino)
176 p = parent->rb_right;
177 else
178 return entry;
179 }
180
181 if (next) {
182 while (parent && ino > entry->ino) {
183 parent = rb_next(parent);
184 entry = rb_entry(parent, struct inode_defrag, rb_node);
185 }
186 *next = parent;
187 }
188 return NULL;
189}
190
191/*
192 * run through the list of inodes in the FS that need
193 * defragging
194 */
195int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
196{
197 struct inode_defrag *defrag;
198 struct btrfs_root *inode_root;
199 struct inode *inode;
200 struct rb_node *n;
201 struct btrfs_key key;
202 struct btrfs_ioctl_defrag_range_args range;
203 u64 first_ino = 0;
204 int num_defrag;
205 int defrag_batch = 1024;
206
207 memset(&range, 0, sizeof(range));
208 range.len = (u64)-1;
209
210 atomic_inc(&fs_info->defrag_running);
211 spin_lock(&fs_info->defrag_inodes_lock);
212 while(1) {
213 n = NULL;
214
215 /* find an inode to defrag */
216 defrag = btrfs_find_defrag_inode(fs_info, first_ino, &n);
217 if (!defrag) {
218 if (n)
219 defrag = rb_entry(n, struct inode_defrag, rb_node);
220 else if (first_ino) {
221 first_ino = 0;
222 continue;
223 } else {
224 break;
225 }
226 }
227
228 /* remove it from the rbtree */
229 first_ino = defrag->ino + 1;
230 rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
231
232 if (fs_info->closing)
233 goto next_free;
234
235 spin_unlock(&fs_info->defrag_inodes_lock);
236
237 /* get the inode */
238 key.objectid = defrag->root;
239 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
240 key.offset = (u64)-1;
241 inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
242 if (IS_ERR(inode_root))
243 goto next;
244
245 key.objectid = defrag->ino;
246 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
247 key.offset = 0;
248
249 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
250 if (IS_ERR(inode))
251 goto next;
252
253 /* do a chunk of defrag */
254 BTRFS_I(inode)->in_defrag = 0;
255 range.start = defrag->last_offset;
256 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
257 defrag_batch);
258 /*
259 * if we filled the whole defrag batch, there
260 * must be more work to do. Queue this defrag
261 * again
262 */
263 if (num_defrag == defrag_batch) {
264 defrag->last_offset = range.start;
265 __btrfs_add_inode_defrag(inode, defrag);
266 /*
267 * we don't want to kfree defrag, we added it back to
268 * the rbtree
269 */
270 defrag = NULL;
271 } else if (defrag->last_offset && !defrag->cycled) {
272 /*
273 * we didn't fill our defrag batch, but
274 * we didn't start at zero. Make sure we loop
275 * around to the start of the file.
276 */
277 defrag->last_offset = 0;
278 defrag->cycled = 1;
279 __btrfs_add_inode_defrag(inode, defrag);
280 defrag = NULL;
281 }
282
283 iput(inode);
284next:
285 spin_lock(&fs_info->defrag_inodes_lock);
286next_free:
287 kfree(defrag);
288 }
289 spin_unlock(&fs_info->defrag_inodes_lock);
290
291 atomic_dec(&fs_info->defrag_running);
292
293 /*
294 * during unmount, we use the transaction_wait queue to
295 * wait for the defragger to stop
296 */
297 wake_up(&fs_info->transaction_wait);
298 return 0;
299}
43 300
44/* simple helper to fault in pages and copy. This should go away 301/* simple helper to fault in pages and copy. This should go away
45 * and be replaced with calls into generic code. 302 * and be replaced with calls into generic code.
@@ -191,9 +448,9 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
191 } 448 }
192 while (1) { 449 while (1) {
193 if (!split) 450 if (!split)
194 split = alloc_extent_map(GFP_NOFS); 451 split = alloc_extent_map();
195 if (!split2) 452 if (!split2)
196 split2 = alloc_extent_map(GFP_NOFS); 453 split2 = alloc_extent_map();
197 BUG_ON(!split || !split2); 454 BUG_ON(!split || !split2);
198 455
199 write_lock(&em_tree->lock); 456 write_lock(&em_tree->lock);
@@ -298,6 +555,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
298 struct btrfs_path *path; 555 struct btrfs_path *path;
299 struct btrfs_key key; 556 struct btrfs_key key;
300 struct btrfs_key new_key; 557 struct btrfs_key new_key;
558 u64 ino = btrfs_ino(inode);
301 u64 search_start = start; 559 u64 search_start = start;
302 u64 disk_bytenr = 0; 560 u64 disk_bytenr = 0;
303 u64 num_bytes = 0; 561 u64 num_bytes = 0;
@@ -318,14 +576,14 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
318 576
319 while (1) { 577 while (1) {
320 recow = 0; 578 recow = 0;
321 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, 579 ret = btrfs_lookup_file_extent(trans, root, path, ino,
322 search_start, -1); 580 search_start, -1);
323 if (ret < 0) 581 if (ret < 0)
324 break; 582 break;
325 if (ret > 0 && path->slots[0] > 0 && search_start == start) { 583 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
326 leaf = path->nodes[0]; 584 leaf = path->nodes[0];
327 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 585 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
328 if (key.objectid == inode->i_ino && 586 if (key.objectid == ino &&
329 key.type == BTRFS_EXTENT_DATA_KEY) 587 key.type == BTRFS_EXTENT_DATA_KEY)
330 path->slots[0]--; 588 path->slots[0]--;
331 } 589 }
@@ -346,7 +604,7 @@ next_slot:
346 } 604 }
347 605
348 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 606 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
349 if (key.objectid > inode->i_ino || 607 if (key.objectid > ino ||
350 key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end) 608 key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
351 break; 609 break;
352 610
@@ -376,7 +634,7 @@ next_slot:
376 634
377 search_start = max(key.offset, start); 635 search_start = max(key.offset, start);
378 if (recow) { 636 if (recow) {
379 btrfs_release_path(root, path); 637 btrfs_release_path(path);
380 continue; 638 continue;
381 } 639 }
382 640
@@ -393,7 +651,7 @@ next_slot:
393 ret = btrfs_duplicate_item(trans, root, path, 651 ret = btrfs_duplicate_item(trans, root, path,
394 &new_key); 652 &new_key);
395 if (ret == -EAGAIN) { 653 if (ret == -EAGAIN) {
396 btrfs_release_path(root, path); 654 btrfs_release_path(path);
397 continue; 655 continue;
398 } 656 }
399 if (ret < 0) 657 if (ret < 0)
@@ -516,7 +774,7 @@ next_slot:
516 del_nr = 0; 774 del_nr = 0;
517 del_slot = 0; 775 del_slot = 0;
518 776
519 btrfs_release_path(root, path); 777 btrfs_release_path(path);
520 continue; 778 continue;
521 } 779 }
522 780
@@ -592,6 +850,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
592 int del_slot = 0; 850 int del_slot = 0;
593 int recow; 851 int recow;
594 int ret; 852 int ret;
853 u64 ino = btrfs_ino(inode);
595 854
596 btrfs_drop_extent_cache(inode, start, end - 1, 0); 855 btrfs_drop_extent_cache(inode, start, end - 1, 0);
597 856
@@ -600,7 +859,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
600again: 859again:
601 recow = 0; 860 recow = 0;
602 split = start; 861 split = start;
603 key.objectid = inode->i_ino; 862 key.objectid = ino;
604 key.type = BTRFS_EXTENT_DATA_KEY; 863 key.type = BTRFS_EXTENT_DATA_KEY;
605 key.offset = split; 864 key.offset = split;
606 865
@@ -612,8 +871,7 @@ again:
612 871
613 leaf = path->nodes[0]; 872 leaf = path->nodes[0];
614 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 873 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
615 BUG_ON(key.objectid != inode->i_ino || 874 BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
616 key.type != BTRFS_EXTENT_DATA_KEY);
617 fi = btrfs_item_ptr(leaf, path->slots[0], 875 fi = btrfs_item_ptr(leaf, path->slots[0],
618 struct btrfs_file_extent_item); 876 struct btrfs_file_extent_item);
619 BUG_ON(btrfs_file_extent_type(leaf, fi) != 877 BUG_ON(btrfs_file_extent_type(leaf, fi) !=
@@ -630,7 +888,7 @@ again:
630 other_start = 0; 888 other_start = 0;
631 other_end = start; 889 other_end = start;
632 if (extent_mergeable(leaf, path->slots[0] - 1, 890 if (extent_mergeable(leaf, path->slots[0] - 1,
633 inode->i_ino, bytenr, orig_offset, 891 ino, bytenr, orig_offset,
634 &other_start, &other_end)) { 892 &other_start, &other_end)) {
635 new_key.offset = end; 893 new_key.offset = end;
636 btrfs_set_item_key_safe(trans, root, path, &new_key); 894 btrfs_set_item_key_safe(trans, root, path, &new_key);
@@ -653,7 +911,7 @@ again:
653 other_start = end; 911 other_start = end;
654 other_end = 0; 912 other_end = 0;
655 if (extent_mergeable(leaf, path->slots[0] + 1, 913 if (extent_mergeable(leaf, path->slots[0] + 1,
656 inode->i_ino, bytenr, orig_offset, 914 ino, bytenr, orig_offset,
657 &other_start, &other_end)) { 915 &other_start, &other_end)) {
658 fi = btrfs_item_ptr(leaf, path->slots[0], 916 fi = btrfs_item_ptr(leaf, path->slots[0],
659 struct btrfs_file_extent_item); 917 struct btrfs_file_extent_item);
@@ -681,7 +939,7 @@ again:
681 new_key.offset = split; 939 new_key.offset = split;
682 ret = btrfs_duplicate_item(trans, root, path, &new_key); 940 ret = btrfs_duplicate_item(trans, root, path, &new_key);
683 if (ret == -EAGAIN) { 941 if (ret == -EAGAIN) {
684 btrfs_release_path(root, path); 942 btrfs_release_path(path);
685 goto again; 943 goto again;
686 } 944 }
687 BUG_ON(ret < 0); 945 BUG_ON(ret < 0);
@@ -702,7 +960,7 @@ again:
702 960
703 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, 961 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
704 root->root_key.objectid, 962 root->root_key.objectid,
705 inode->i_ino, orig_offset); 963 ino, orig_offset);
706 BUG_ON(ret); 964 BUG_ON(ret);
707 965
708 if (split == start) { 966 if (split == start) {
@@ -718,10 +976,10 @@ again:
718 other_start = end; 976 other_start = end;
719 other_end = 0; 977 other_end = 0;
720 if (extent_mergeable(leaf, path->slots[0] + 1, 978 if (extent_mergeable(leaf, path->slots[0] + 1,
721 inode->i_ino, bytenr, orig_offset, 979 ino, bytenr, orig_offset,
722 &other_start, &other_end)) { 980 &other_start, &other_end)) {
723 if (recow) { 981 if (recow) {
724 btrfs_release_path(root, path); 982 btrfs_release_path(path);
725 goto again; 983 goto again;
726 } 984 }
727 extent_end = other_end; 985 extent_end = other_end;
@@ -729,16 +987,16 @@ again:
729 del_nr++; 987 del_nr++;
730 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 988 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
731 0, root->root_key.objectid, 989 0, root->root_key.objectid,
732 inode->i_ino, orig_offset); 990 ino, orig_offset);
733 BUG_ON(ret); 991 BUG_ON(ret);
734 } 992 }
735 other_start = 0; 993 other_start = 0;
736 other_end = start; 994 other_end = start;
737 if (extent_mergeable(leaf, path->slots[0] - 1, 995 if (extent_mergeable(leaf, path->slots[0] - 1,
738 inode->i_ino, bytenr, orig_offset, 996 ino, bytenr, orig_offset,
739 &other_start, &other_end)) { 997 &other_start, &other_end)) {
740 if (recow) { 998 if (recow) {
741 btrfs_release_path(root, path); 999 btrfs_release_path(path);
742 goto again; 1000 goto again;
743 } 1001 }
744 key.offset = other_start; 1002 key.offset = other_start;
@@ -746,7 +1004,7 @@ again:
746 del_nr++; 1004 del_nr++;
747 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 1005 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
748 0, root->root_key.objectid, 1006 0, root->root_key.objectid,
749 inode->i_ino, orig_offset); 1007 ino, orig_offset);
750 BUG_ON(ret); 1008 BUG_ON(ret);
751 } 1009 }
752 if (del_nr == 0) { 1010 if (del_nr == 0) {
@@ -1375,7 +1633,7 @@ static long btrfs_fallocate(struct file *file, int mode,
1375 while (1) { 1633 while (1) {
1376 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 1634 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
1377 alloc_end - cur_offset, 0); 1635 alloc_end - cur_offset, 0);
1378 BUG_ON(IS_ERR(em) || !em); 1636 BUG_ON(IS_ERR_OR_NULL(em));
1379 last_byte = min(extent_map_end(em), alloc_end); 1637 last_byte = min(extent_map_end(em), alloc_end);
1380 last_byte = (last_byte + mask) & ~mask; 1638 last_byte = (last_byte + mask) & ~mask;
1381 if (em->block_start == EXTENT_MAP_HOLE || 1639 if (em->block_start == EXTENT_MAP_HOLE ||
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 63731a1fb0a1..70d45795d758 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -25,18 +25,17 @@
25#include "transaction.h" 25#include "transaction.h"
26#include "disk-io.h" 26#include "disk-io.h"
27#include "extent_io.h" 27#include "extent_io.h"
28#include "inode-map.h"
28 29
29#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) 30#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
30#define MAX_CACHE_BYTES_PER_GIG (32 * 1024) 31#define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
31 32
32static void recalculate_thresholds(struct btrfs_block_group_cache 33static int link_free_space(struct btrfs_free_space_ctl *ctl,
33 *block_group);
34static int link_free_space(struct btrfs_block_group_cache *block_group,
35 struct btrfs_free_space *info); 34 struct btrfs_free_space *info);
36 35
37struct inode *lookup_free_space_inode(struct btrfs_root *root, 36static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
38 struct btrfs_block_group_cache 37 struct btrfs_path *path,
39 *block_group, struct btrfs_path *path) 38 u64 offset)
40{ 39{
41 struct btrfs_key key; 40 struct btrfs_key key;
42 struct btrfs_key location; 41 struct btrfs_key location;
@@ -46,22 +45,15 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
46 struct inode *inode = NULL; 45 struct inode *inode = NULL;
47 int ret; 46 int ret;
48 47
49 spin_lock(&block_group->lock);
50 if (block_group->inode)
51 inode = igrab(block_group->inode);
52 spin_unlock(&block_group->lock);
53 if (inode)
54 return inode;
55
56 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 48 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
57 key.offset = block_group->key.objectid; 49 key.offset = offset;
58 key.type = 0; 50 key.type = 0;
59 51
60 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 52 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
61 if (ret < 0) 53 if (ret < 0)
62 return ERR_PTR(ret); 54 return ERR_PTR(ret);
63 if (ret > 0) { 55 if (ret > 0) {
64 btrfs_release_path(root, path); 56 btrfs_release_path(path);
65 return ERR_PTR(-ENOENT); 57 return ERR_PTR(-ENOENT);
66 } 58 }
67 59
@@ -70,7 +62,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
70 struct btrfs_free_space_header); 62 struct btrfs_free_space_header);
71 btrfs_free_space_key(leaf, header, &disk_key); 63 btrfs_free_space_key(leaf, header, &disk_key);
72 btrfs_disk_key_to_cpu(&location, &disk_key); 64 btrfs_disk_key_to_cpu(&location, &disk_key);
73 btrfs_release_path(root, path); 65 btrfs_release_path(path);
74 66
75 inode = btrfs_iget(root->fs_info->sb, &location, root, NULL); 67 inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
76 if (!inode) 68 if (!inode)
@@ -84,6 +76,27 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
84 76
85 inode->i_mapping->flags &= ~__GFP_FS; 77 inode->i_mapping->flags &= ~__GFP_FS;
86 78
79 return inode;
80}
81
82struct inode *lookup_free_space_inode(struct btrfs_root *root,
83 struct btrfs_block_group_cache
84 *block_group, struct btrfs_path *path)
85{
86 struct inode *inode = NULL;
87
88 spin_lock(&block_group->lock);
89 if (block_group->inode)
90 inode = igrab(block_group->inode);
91 spin_unlock(&block_group->lock);
92 if (inode)
93 return inode;
94
95 inode = __lookup_free_space_inode(root, path,
96 block_group->key.objectid);
97 if (IS_ERR(inode))
98 return inode;
99
87 spin_lock(&block_group->lock); 100 spin_lock(&block_group->lock);
88 if (!root->fs_info->closing) { 101 if (!root->fs_info->closing) {
89 block_group->inode = igrab(inode); 102 block_group->inode = igrab(inode);
@@ -94,24 +107,18 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
94 return inode; 107 return inode;
95} 108}
96 109
97int create_free_space_inode(struct btrfs_root *root, 110int __create_free_space_inode(struct btrfs_root *root,
98 struct btrfs_trans_handle *trans, 111 struct btrfs_trans_handle *trans,
99 struct btrfs_block_group_cache *block_group, 112 struct btrfs_path *path, u64 ino, u64 offset)
100 struct btrfs_path *path)
101{ 113{
102 struct btrfs_key key; 114 struct btrfs_key key;
103 struct btrfs_disk_key disk_key; 115 struct btrfs_disk_key disk_key;
104 struct btrfs_free_space_header *header; 116 struct btrfs_free_space_header *header;
105 struct btrfs_inode_item *inode_item; 117 struct btrfs_inode_item *inode_item;
106 struct extent_buffer *leaf; 118 struct extent_buffer *leaf;
107 u64 objectid;
108 int ret; 119 int ret;
109 120
110 ret = btrfs_find_free_objectid(trans, root, 0, &objectid); 121 ret = btrfs_insert_empty_inode(trans, root, path, ino);
111 if (ret < 0)
112 return ret;
113
114 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
115 if (ret) 122 if (ret)
116 return ret; 123 return ret;
117 124
@@ -131,19 +138,18 @@ int create_free_space_inode(struct btrfs_root *root,
131 BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM); 138 BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM);
132 btrfs_set_inode_nlink(leaf, inode_item, 1); 139 btrfs_set_inode_nlink(leaf, inode_item, 1);
133 btrfs_set_inode_transid(leaf, inode_item, trans->transid); 140 btrfs_set_inode_transid(leaf, inode_item, trans->transid);
134 btrfs_set_inode_block_group(leaf, inode_item, 141 btrfs_set_inode_block_group(leaf, inode_item, offset);
135 block_group->key.objectid);
136 btrfs_mark_buffer_dirty(leaf); 142 btrfs_mark_buffer_dirty(leaf);
137 btrfs_release_path(root, path); 143 btrfs_release_path(path);
138 144
139 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 145 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
140 key.offset = block_group->key.objectid; 146 key.offset = offset;
141 key.type = 0; 147 key.type = 0;
142 148
143 ret = btrfs_insert_empty_item(trans, root, path, &key, 149 ret = btrfs_insert_empty_item(trans, root, path, &key,
144 sizeof(struct btrfs_free_space_header)); 150 sizeof(struct btrfs_free_space_header));
145 if (ret < 0) { 151 if (ret < 0) {
146 btrfs_release_path(root, path); 152 btrfs_release_path(path);
147 return ret; 153 return ret;
148 } 154 }
149 leaf = path->nodes[0]; 155 leaf = path->nodes[0];
@@ -152,11 +158,27 @@ int create_free_space_inode(struct btrfs_root *root,
152 memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header)); 158 memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
153 btrfs_set_free_space_key(leaf, header, &disk_key); 159 btrfs_set_free_space_key(leaf, header, &disk_key);
154 btrfs_mark_buffer_dirty(leaf); 160 btrfs_mark_buffer_dirty(leaf);
155 btrfs_release_path(root, path); 161 btrfs_release_path(path);
156 162
157 return 0; 163 return 0;
158} 164}
159 165
166int create_free_space_inode(struct btrfs_root *root,
167 struct btrfs_trans_handle *trans,
168 struct btrfs_block_group_cache *block_group,
169 struct btrfs_path *path)
170{
171 int ret;
172 u64 ino;
173
174 ret = btrfs_find_free_objectid(root, &ino);
175 if (ret < 0)
176 return ret;
177
178 return __create_free_space_inode(root, trans, path, ino,
179 block_group->key.objectid);
180}
181
160int btrfs_truncate_free_space_cache(struct btrfs_root *root, 182int btrfs_truncate_free_space_cache(struct btrfs_root *root,
161 struct btrfs_trans_handle *trans, 183 struct btrfs_trans_handle *trans,
162 struct btrfs_path *path, 184 struct btrfs_path *path,
@@ -187,7 +209,8 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
187 return ret; 209 return ret;
188 } 210 }
189 211
190 return btrfs_update_inode(trans, root, inode); 212 ret = btrfs_update_inode(trans, root, inode);
213 return ret;
191} 214}
192 215
193static int readahead_cache(struct inode *inode) 216static int readahead_cache(struct inode *inode)
@@ -209,15 +232,13 @@ static int readahead_cache(struct inode *inode)
209 return 0; 232 return 0;
210} 233}
211 234
212int load_free_space_cache(struct btrfs_fs_info *fs_info, 235int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
213 struct btrfs_block_group_cache *block_group) 236 struct btrfs_free_space_ctl *ctl,
237 struct btrfs_path *path, u64 offset)
214{ 238{
215 struct btrfs_root *root = fs_info->tree_root;
216 struct inode *inode;
217 struct btrfs_free_space_header *header; 239 struct btrfs_free_space_header *header;
218 struct extent_buffer *leaf; 240 struct extent_buffer *leaf;
219 struct page *page; 241 struct page *page;
220 struct btrfs_path *path;
221 u32 *checksums = NULL, *crc; 242 u32 *checksums = NULL, *crc;
222 char *disk_crcs = NULL; 243 char *disk_crcs = NULL;
223 struct btrfs_key key; 244 struct btrfs_key key;
@@ -225,76 +246,47 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
225 u64 num_entries; 246 u64 num_entries;
226 u64 num_bitmaps; 247 u64 num_bitmaps;
227 u64 generation; 248 u64 generation;
228 u64 used = btrfs_block_group_used(&block_group->item);
229 u32 cur_crc = ~(u32)0; 249 u32 cur_crc = ~(u32)0;
230 pgoff_t index = 0; 250 pgoff_t index = 0;
231 unsigned long first_page_offset; 251 unsigned long first_page_offset;
232 int num_checksums; 252 int num_checksums;
233 int ret = 0; 253 int ret = 0, ret2;
234
235 /*
236 * If we're unmounting then just return, since this does a search on the
237 * normal root and not the commit root and we could deadlock.
238 */
239 smp_mb();
240 if (fs_info->closing)
241 return 0;
242
243 /*
244 * If this block group has been marked to be cleared for one reason or
245 * another then we can't trust the on disk cache, so just return.
246 */
247 spin_lock(&block_group->lock);
248 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
249 spin_unlock(&block_group->lock);
250 return 0;
251 }
252 spin_unlock(&block_group->lock);
253 254
254 INIT_LIST_HEAD(&bitmaps); 255 INIT_LIST_HEAD(&bitmaps);
255 256
256 path = btrfs_alloc_path();
257 if (!path)
258 return 0;
259
260 inode = lookup_free_space_inode(root, block_group, path);
261 if (IS_ERR(inode)) {
262 btrfs_free_path(path);
263 return 0;
264 }
265
266 /* Nothing in the space cache, goodbye */ 257 /* Nothing in the space cache, goodbye */
267 if (!i_size_read(inode)) { 258 if (!i_size_read(inode))
268 btrfs_free_path(path);
269 goto out; 259 goto out;
270 }
271 260
272 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 261 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
273 key.offset = block_group->key.objectid; 262 key.offset = offset;
274 key.type = 0; 263 key.type = 0;
275 264
276 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 265 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
277 if (ret) { 266 if (ret < 0)
278 btrfs_free_path(path); 267 goto out;
268 else if (ret > 0) {
269 btrfs_release_path(path);
270 ret = 0;
279 goto out; 271 goto out;
280 } 272 }
281 273
274 ret = -1;
275
282 leaf = path->nodes[0]; 276 leaf = path->nodes[0];
283 header = btrfs_item_ptr(leaf, path->slots[0], 277 header = btrfs_item_ptr(leaf, path->slots[0],
284 struct btrfs_free_space_header); 278 struct btrfs_free_space_header);
285 num_entries = btrfs_free_space_entries(leaf, header); 279 num_entries = btrfs_free_space_entries(leaf, header);
286 num_bitmaps = btrfs_free_space_bitmaps(leaf, header); 280 num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
287 generation = btrfs_free_space_generation(leaf, header); 281 generation = btrfs_free_space_generation(leaf, header);
288 btrfs_free_path(path); 282 btrfs_release_path(path);
289 283
290 if (BTRFS_I(inode)->generation != generation) { 284 if (BTRFS_I(inode)->generation != generation) {
291 printk(KERN_ERR "btrfs: free space inode generation (%llu) did" 285 printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
292 " not match free space cache generation (%llu) for " 286 " not match free space cache generation (%llu)\n",
293 "block group %llu\n",
294 (unsigned long long)BTRFS_I(inode)->generation, 287 (unsigned long long)BTRFS_I(inode)->generation,
295 (unsigned long long)generation, 288 (unsigned long long)generation);
296 (unsigned long long)block_group->key.objectid); 289 goto out;
297 goto free_cache;
298 } 290 }
299 291
300 if (!num_entries) 292 if (!num_entries)
@@ -311,10 +303,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
311 goto out; 303 goto out;
312 304
313 ret = readahead_cache(inode); 305 ret = readahead_cache(inode);
314 if (ret) { 306 if (ret)
315 ret = 0;
316 goto out; 307 goto out;
317 }
318 308
319 while (1) { 309 while (1) {
320 struct btrfs_free_space_entry *entry; 310 struct btrfs_free_space_entry *entry;
@@ -333,10 +323,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
333 } 323 }
334 324
335 page = grab_cache_page(inode->i_mapping, index); 325 page = grab_cache_page(inode->i_mapping, index);
336 if (!page) { 326 if (!page)
337 ret = 0;
338 goto free_cache; 327 goto free_cache;
339 }
340 328
341 if (!PageUptodate(page)) { 329 if (!PageUptodate(page)) {
342 btrfs_readpage(NULL, page); 330 btrfs_readpage(NULL, page);
@@ -345,9 +333,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
345 unlock_page(page); 333 unlock_page(page);
346 page_cache_release(page); 334 page_cache_release(page);
347 printk(KERN_ERR "btrfs: error reading free " 335 printk(KERN_ERR "btrfs: error reading free "
348 "space cache: %llu\n", 336 "space cache\n");
349 (unsigned long long)
350 block_group->key.objectid);
351 goto free_cache; 337 goto free_cache;
352 } 338 }
353 } 339 }
@@ -360,13 +346,10 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
360 gen = addr + (sizeof(u32) * num_checksums); 346 gen = addr + (sizeof(u32) * num_checksums);
361 if (*gen != BTRFS_I(inode)->generation) { 347 if (*gen != BTRFS_I(inode)->generation) {
362 printk(KERN_ERR "btrfs: space cache generation" 348 printk(KERN_ERR "btrfs: space cache generation"
363 " (%llu) does not match inode (%llu) " 349 " (%llu) does not match inode (%llu)\n",
364 "for block group %llu\n",
365 (unsigned long long)*gen, 350 (unsigned long long)*gen,
366 (unsigned long long) 351 (unsigned long long)
367 BTRFS_I(inode)->generation, 352 BTRFS_I(inode)->generation);
368 (unsigned long long)
369 block_group->key.objectid);
370 kunmap(page); 353 kunmap(page);
371 unlock_page(page); 354 unlock_page(page);
372 page_cache_release(page); 355 page_cache_release(page);
@@ -382,9 +365,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
382 PAGE_CACHE_SIZE - start_offset); 365 PAGE_CACHE_SIZE - start_offset);
383 btrfs_csum_final(cur_crc, (char *)&cur_crc); 366 btrfs_csum_final(cur_crc, (char *)&cur_crc);
384 if (cur_crc != *crc) { 367 if (cur_crc != *crc) {
385 printk(KERN_ERR "btrfs: crc mismatch for page %lu in " 368 printk(KERN_ERR "btrfs: crc mismatch for page %lu\n",
386 "block group %llu\n", index, 369 index);
387 (unsigned long long)block_group->key.objectid);
388 kunmap(page); 370 kunmap(page);
389 unlock_page(page); 371 unlock_page(page);
390 page_cache_release(page); 372 page_cache_release(page);
@@ -417,9 +399,9 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
417 } 399 }
418 400
419 if (entry->type == BTRFS_FREE_SPACE_EXTENT) { 401 if (entry->type == BTRFS_FREE_SPACE_EXTENT) {
420 spin_lock(&block_group->tree_lock); 402 spin_lock(&ctl->tree_lock);
421 ret = link_free_space(block_group, e); 403 ret = link_free_space(ctl, e);
422 spin_unlock(&block_group->tree_lock); 404 spin_unlock(&ctl->tree_lock);
423 BUG_ON(ret); 405 BUG_ON(ret);
424 } else { 406 } else {
425 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); 407 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
@@ -431,11 +413,11 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
431 page_cache_release(page); 413 page_cache_release(page);
432 goto free_cache; 414 goto free_cache;
433 } 415 }
434 spin_lock(&block_group->tree_lock); 416 spin_lock(&ctl->tree_lock);
435 ret = link_free_space(block_group, e); 417 ret2 = link_free_space(ctl, e);
436 block_group->total_bitmaps++; 418 ctl->total_bitmaps++;
437 recalculate_thresholds(block_group); 419 ctl->op->recalc_thresholds(ctl);
438 spin_unlock(&block_group->tree_lock); 420 spin_unlock(&ctl->tree_lock);
439 list_add_tail(&e->list, &bitmaps); 421 list_add_tail(&e->list, &bitmaps);
440 } 422 }
441 423
@@ -471,41 +453,97 @@ next:
471 index++; 453 index++;
472 } 454 }
473 455
474 spin_lock(&block_group->tree_lock);
475 if (block_group->free_space != (block_group->key.offset - used -
476 block_group->bytes_super)) {
477 spin_unlock(&block_group->tree_lock);
478 printk(KERN_ERR "block group %llu has an wrong amount of free "
479 "space\n", block_group->key.objectid);
480 ret = 0;
481 goto free_cache;
482 }
483 spin_unlock(&block_group->tree_lock);
484
485 ret = 1; 456 ret = 1;
486out: 457out:
487 kfree(checksums); 458 kfree(checksums);
488 kfree(disk_crcs); 459 kfree(disk_crcs);
489 iput(inode);
490 return ret; 460 return ret;
491
492free_cache: 461free_cache:
493 /* This cache is bogus, make sure it gets cleared */ 462 __btrfs_remove_free_space_cache(ctl);
463 goto out;
464}
465
466int load_free_space_cache(struct btrfs_fs_info *fs_info,
467 struct btrfs_block_group_cache *block_group)
468{
469 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
470 struct btrfs_root *root = fs_info->tree_root;
471 struct inode *inode;
472 struct btrfs_path *path;
473 int ret;
474 bool matched;
475 u64 used = btrfs_block_group_used(&block_group->item);
476
477 /*
478 * If we're unmounting then just return, since this does a search on the
479 * normal root and not the commit root and we could deadlock.
480 */
481 smp_mb();
482 if (fs_info->closing)
483 return 0;
484
485 /*
486 * If this block group has been marked to be cleared for one reason or
487 * another then we can't trust the on disk cache, so just return.
488 */
494 spin_lock(&block_group->lock); 489 spin_lock(&block_group->lock);
495 block_group->disk_cache_state = BTRFS_DC_CLEAR; 490 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
491 spin_unlock(&block_group->lock);
492 return 0;
493 }
496 spin_unlock(&block_group->lock); 494 spin_unlock(&block_group->lock);
497 btrfs_remove_free_space_cache(block_group); 495
498 goto out; 496 path = btrfs_alloc_path();
497 if (!path)
498 return 0;
499
500 inode = lookup_free_space_inode(root, block_group, path);
501 if (IS_ERR(inode)) {
502 btrfs_free_path(path);
503 return 0;
504 }
505
506 ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
507 path, block_group->key.objectid);
508 btrfs_free_path(path);
509 if (ret <= 0)
510 goto out;
511
512 spin_lock(&ctl->tree_lock);
513 matched = (ctl->free_space == (block_group->key.offset - used -
514 block_group->bytes_super));
515 spin_unlock(&ctl->tree_lock);
516
517 if (!matched) {
518 __btrfs_remove_free_space_cache(ctl);
519 printk(KERN_ERR "block group %llu has an wrong amount of free "
520 "space\n", block_group->key.objectid);
521 ret = -1;
522 }
523out:
524 if (ret < 0) {
525 /* This cache is bogus, make sure it gets cleared */
526 spin_lock(&block_group->lock);
527 block_group->disk_cache_state = BTRFS_DC_CLEAR;
528 spin_unlock(&block_group->lock);
529 ret = 0;
530
531 printk(KERN_ERR "btrfs: failed to load free space cache "
532 "for block group %llu\n", block_group->key.objectid);
533 }
534
535 iput(inode);
536 return ret;
499} 537}
500 538
501int btrfs_write_out_cache(struct btrfs_root *root, 539int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
502 struct btrfs_trans_handle *trans, 540 struct btrfs_free_space_ctl *ctl,
503 struct btrfs_block_group_cache *block_group, 541 struct btrfs_block_group_cache *block_group,
504 struct btrfs_path *path) 542 struct btrfs_trans_handle *trans,
543 struct btrfs_path *path, u64 offset)
505{ 544{
506 struct btrfs_free_space_header *header; 545 struct btrfs_free_space_header *header;
507 struct extent_buffer *leaf; 546 struct extent_buffer *leaf;
508 struct inode *inode;
509 struct rb_node *node; 547 struct rb_node *node;
510 struct list_head *pos, *n; 548 struct list_head *pos, *n;
511 struct page **pages; 549 struct page **pages;
@@ -522,35 +560,18 @@ int btrfs_write_out_cache(struct btrfs_root *root,
522 int index = 0, num_pages = 0; 560 int index = 0, num_pages = 0;
523 int entries = 0; 561 int entries = 0;
524 int bitmaps = 0; 562 int bitmaps = 0;
525 int ret = 0; 563 int ret = -1;
526 bool next_page = false; 564 bool next_page = false;
527 bool out_of_space = false; 565 bool out_of_space = false;
528 566
529 root = root->fs_info->tree_root;
530
531 INIT_LIST_HEAD(&bitmap_list); 567 INIT_LIST_HEAD(&bitmap_list);
532 568
533 spin_lock(&block_group->lock); 569 node = rb_first(&ctl->free_space_offset);
534 if (block_group->disk_cache_state < BTRFS_DC_SETUP) { 570 if (!node)
535 spin_unlock(&block_group->lock);
536 return 0;
537 }
538 spin_unlock(&block_group->lock);
539
540 inode = lookup_free_space_inode(root, block_group, path);
541 if (IS_ERR(inode))
542 return 0;
543
544 if (!i_size_read(inode)) {
545 iput(inode);
546 return 0; 571 return 0;
547 }
548 572
549 node = rb_first(&block_group->free_space_offset); 573 if (!i_size_read(inode))
550 if (!node) { 574 return -1;
551 iput(inode);
552 return 0;
553 }
554 575
555 num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> 576 num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
556 PAGE_CACHE_SHIFT; 577 PAGE_CACHE_SHIFT;
@@ -560,16 +581,13 @@ int btrfs_write_out_cache(struct btrfs_root *root,
560 581
561 /* We need a checksum per page. */ 582 /* We need a checksum per page. */
562 crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); 583 crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
563 if (!crc) { 584 if (!crc)
564 iput(inode); 585 return -1;
565 return 0;
566 }
567 586
568 pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS); 587 pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
569 if (!pages) { 588 if (!pages) {
570 kfree(crc); 589 kfree(crc);
571 iput(inode); 590 return -1;
572 return 0;
573 } 591 }
574 592
575 /* Since the first page has all of our checksums and our generation we 593 /* Since the first page has all of our checksums and our generation we
@@ -579,7 +597,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
579 first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); 597 first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
580 598
581 /* Get the cluster for this block_group if it exists */ 599 /* Get the cluster for this block_group if it exists */
582 if (!list_empty(&block_group->cluster_list)) 600 if (block_group && !list_empty(&block_group->cluster_list))
583 cluster = list_entry(block_group->cluster_list.next, 601 cluster = list_entry(block_group->cluster_list.next,
584 struct btrfs_free_cluster, 602 struct btrfs_free_cluster,
585 block_group_list); 603 block_group_list);
@@ -621,7 +639,8 @@ int btrfs_write_out_cache(struct btrfs_root *root,
621 * When searching for pinned extents, we need to start at our start 639 * When searching for pinned extents, we need to start at our start
622 * offset. 640 * offset.
623 */ 641 */
624 start = block_group->key.objectid; 642 if (block_group)
643 start = block_group->key.objectid;
625 644
626 /* Write out the extent entries */ 645 /* Write out the extent entries */
627 do { 646 do {
@@ -679,8 +698,9 @@ int btrfs_write_out_cache(struct btrfs_root *root,
679 * We want to add any pinned extents to our free space cache 698 * We want to add any pinned extents to our free space cache
680 * so we don't leak the space 699 * so we don't leak the space
681 */ 700 */
682 while (!next_page && (start < block_group->key.objectid + 701 while (block_group && !next_page &&
683 block_group->key.offset)) { 702 (start < block_group->key.objectid +
703 block_group->key.offset)) {
684 ret = find_first_extent_bit(unpin, start, &start, &end, 704 ret = find_first_extent_bit(unpin, start, &start, &end,
685 EXTENT_DIRTY); 705 EXTENT_DIRTY);
686 if (ret) { 706 if (ret) {
@@ -798,12 +818,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
798 filemap_write_and_wait(inode->i_mapping); 818 filemap_write_and_wait(inode->i_mapping);
799 819
800 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 820 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
801 key.offset = block_group->key.objectid; 821 key.offset = offset;
802 key.type = 0; 822 key.type = 0;
803 823
804 ret = btrfs_search_slot(trans, root, &key, path, 1, 1); 824 ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
805 if (ret < 0) { 825 if (ret < 0) {
806 ret = 0; 826 ret = -1;
807 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, 827 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
808 EXTENT_DIRTY | EXTENT_DELALLOC | 828 EXTENT_DIRTY | EXTENT_DELALLOC |
809 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); 829 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
@@ -816,13 +836,13 @@ int btrfs_write_out_cache(struct btrfs_root *root,
816 path->slots[0]--; 836 path->slots[0]--;
817 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 837 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
818 if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || 838 if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
819 found_key.offset != block_group->key.objectid) { 839 found_key.offset != offset) {
820 ret = 0; 840 ret = -1;
821 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, 841 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
822 EXTENT_DIRTY | EXTENT_DELALLOC | 842 EXTENT_DIRTY | EXTENT_DELALLOC |
823 EXTENT_DO_ACCOUNTING, 0, 0, NULL, 843 EXTENT_DO_ACCOUNTING, 0, 0, NULL,
824 GFP_NOFS); 844 GFP_NOFS);
825 btrfs_release_path(root, path); 845 btrfs_release_path(path);
826 goto out_free; 846 goto out_free;
827 } 847 }
828 } 848 }
@@ -832,49 +852,83 @@ int btrfs_write_out_cache(struct btrfs_root *root,
832 btrfs_set_free_space_bitmaps(leaf, header, bitmaps); 852 btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
833 btrfs_set_free_space_generation(leaf, header, trans->transid); 853 btrfs_set_free_space_generation(leaf, header, trans->transid);
834 btrfs_mark_buffer_dirty(leaf); 854 btrfs_mark_buffer_dirty(leaf);
835 btrfs_release_path(root, path); 855 btrfs_release_path(path);
836 856
837 ret = 1; 857 ret = 1;
838 858
839out_free: 859out_free:
840 if (ret == 0) { 860 if (ret != 1) {
841 invalidate_inode_pages2_range(inode->i_mapping, 0, index); 861 invalidate_inode_pages2_range(inode->i_mapping, 0, index);
842 spin_lock(&block_group->lock);
843 block_group->disk_cache_state = BTRFS_DC_ERROR;
844 spin_unlock(&block_group->lock);
845 BTRFS_I(inode)->generation = 0; 862 BTRFS_I(inode)->generation = 0;
846 } 863 }
847 kfree(checksums); 864 kfree(checksums);
848 kfree(pages); 865 kfree(pages);
849 btrfs_update_inode(trans, root, inode); 866 btrfs_update_inode(trans, root, inode);
867 return ret;
868}
869
870int btrfs_write_out_cache(struct btrfs_root *root,
871 struct btrfs_trans_handle *trans,
872 struct btrfs_block_group_cache *block_group,
873 struct btrfs_path *path)
874{
875 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
876 struct inode *inode;
877 int ret = 0;
878
879 root = root->fs_info->tree_root;
880
881 spin_lock(&block_group->lock);
882 if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
883 spin_unlock(&block_group->lock);
884 return 0;
885 }
886 spin_unlock(&block_group->lock);
887
888 inode = lookup_free_space_inode(root, block_group, path);
889 if (IS_ERR(inode))
890 return 0;
891
892 ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
893 path, block_group->key.objectid);
894 if (ret < 0) {
895 spin_lock(&block_group->lock);
896 block_group->disk_cache_state = BTRFS_DC_ERROR;
897 spin_unlock(&block_group->lock);
898 ret = 0;
899
900 printk(KERN_ERR "btrfs: failed to write free space cace "
901 "for block group %llu\n", block_group->key.objectid);
902 }
903
850 iput(inode); 904 iput(inode);
851 return ret; 905 return ret;
852} 906}
853 907
854static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize, 908static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
855 u64 offset) 909 u64 offset)
856{ 910{
857 BUG_ON(offset < bitmap_start); 911 BUG_ON(offset < bitmap_start);
858 offset -= bitmap_start; 912 offset -= bitmap_start;
859 return (unsigned long)(div64_u64(offset, sectorsize)); 913 return (unsigned long)(div_u64(offset, unit));
860} 914}
861 915
862static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize) 916static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
863{ 917{
864 return (unsigned long)(div64_u64(bytes, sectorsize)); 918 return (unsigned long)(div_u64(bytes, unit));
865} 919}
866 920
867static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group, 921static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
868 u64 offset) 922 u64 offset)
869{ 923{
870 u64 bitmap_start; 924 u64 bitmap_start;
871 u64 bytes_per_bitmap; 925 u64 bytes_per_bitmap;
872 926
873 bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize; 927 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
874 bitmap_start = offset - block_group->key.objectid; 928 bitmap_start = offset - ctl->start;
875 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); 929 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
876 bitmap_start *= bytes_per_bitmap; 930 bitmap_start *= bytes_per_bitmap;
877 bitmap_start += block_group->key.objectid; 931 bitmap_start += ctl->start;
878 932
879 return bitmap_start; 933 return bitmap_start;
880} 934}
@@ -932,10 +986,10 @@ static int tree_insert_offset(struct rb_root *root, u64 offset,
932 * offset. 986 * offset.
933 */ 987 */
934static struct btrfs_free_space * 988static struct btrfs_free_space *
935tree_search_offset(struct btrfs_block_group_cache *block_group, 989tree_search_offset(struct btrfs_free_space_ctl *ctl,
936 u64 offset, int bitmap_only, int fuzzy) 990 u64 offset, int bitmap_only, int fuzzy)
937{ 991{
938 struct rb_node *n = block_group->free_space_offset.rb_node; 992 struct rb_node *n = ctl->free_space_offset.rb_node;
939 struct btrfs_free_space *entry, *prev = NULL; 993 struct btrfs_free_space *entry, *prev = NULL;
940 994
941 /* find entry that is closest to the 'offset' */ 995 /* find entry that is closest to the 'offset' */
@@ -1031,8 +1085,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
1031 break; 1085 break;
1032 } 1086 }
1033 } 1087 }
1034 if (entry->offset + BITS_PER_BITMAP * 1088 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1035 block_group->sectorsize > offset)
1036 return entry; 1089 return entry;
1037 } else if (entry->offset + entry->bytes > offset) 1090 } else if (entry->offset + entry->bytes > offset)
1038 return entry; 1091 return entry;
@@ -1043,7 +1096,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
1043 while (1) { 1096 while (1) {
1044 if (entry->bitmap) { 1097 if (entry->bitmap) {
1045 if (entry->offset + BITS_PER_BITMAP * 1098 if (entry->offset + BITS_PER_BITMAP *
1046 block_group->sectorsize > offset) 1099 ctl->unit > offset)
1047 break; 1100 break;
1048 } else { 1101 } else {
1049 if (entry->offset + entry->bytes > offset) 1102 if (entry->offset + entry->bytes > offset)
@@ -1059,42 +1112,47 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
1059} 1112}
1060 1113
1061static inline void 1114static inline void
1062__unlink_free_space(struct btrfs_block_group_cache *block_group, 1115__unlink_free_space(struct btrfs_free_space_ctl *ctl,
1063 struct btrfs_free_space *info) 1116 struct btrfs_free_space *info)
1064{ 1117{
1065 rb_erase(&info->offset_index, &block_group->free_space_offset); 1118 rb_erase(&info->offset_index, &ctl->free_space_offset);
1066 block_group->free_extents--; 1119 ctl->free_extents--;
1067} 1120}
1068 1121
1069static void unlink_free_space(struct btrfs_block_group_cache *block_group, 1122static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1070 struct btrfs_free_space *info) 1123 struct btrfs_free_space *info)
1071{ 1124{
1072 __unlink_free_space(block_group, info); 1125 __unlink_free_space(ctl, info);
1073 block_group->free_space -= info->bytes; 1126 ctl->free_space -= info->bytes;
1074} 1127}
1075 1128
1076static int link_free_space(struct btrfs_block_group_cache *block_group, 1129static int link_free_space(struct btrfs_free_space_ctl *ctl,
1077 struct btrfs_free_space *info) 1130 struct btrfs_free_space *info)
1078{ 1131{
1079 int ret = 0; 1132 int ret = 0;
1080 1133
1081 BUG_ON(!info->bitmap && !info->bytes); 1134 BUG_ON(!info->bitmap && !info->bytes);
1082 ret = tree_insert_offset(&block_group->free_space_offset, info->offset, 1135 ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1083 &info->offset_index, (info->bitmap != NULL)); 1136 &info->offset_index, (info->bitmap != NULL));
1084 if (ret) 1137 if (ret)
1085 return ret; 1138 return ret;
1086 1139
1087 block_group->free_space += info->bytes; 1140 ctl->free_space += info->bytes;
1088 block_group->free_extents++; 1141 ctl->free_extents++;
1089 return ret; 1142 return ret;
1090} 1143}
1091 1144
1092static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) 1145static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1093{ 1146{
1147 struct btrfs_block_group_cache *block_group = ctl->private;
1094 u64 max_bytes; 1148 u64 max_bytes;
1095 u64 bitmap_bytes; 1149 u64 bitmap_bytes;
1096 u64 extent_bytes; 1150 u64 extent_bytes;
1097 u64 size = block_group->key.offset; 1151 u64 size = block_group->key.offset;
1152 u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
1153 int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1154
1155 BUG_ON(ctl->total_bitmaps > max_bitmaps);
1098 1156
1099 /* 1157 /*
1100 * The goal is to keep the total amount of memory used per 1gb of space 1158 * The goal is to keep the total amount of memory used per 1gb of space
@@ -1112,10 +1170,10 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
1112 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as 1170 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1113 * we add more bitmaps. 1171 * we add more bitmaps.
1114 */ 1172 */
1115 bitmap_bytes = (block_group->total_bitmaps + 1) * PAGE_CACHE_SIZE; 1173 bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
1116 1174
1117 if (bitmap_bytes >= max_bytes) { 1175 if (bitmap_bytes >= max_bytes) {
1118 block_group->extents_thresh = 0; 1176 ctl->extents_thresh = 0;
1119 return; 1177 return;
1120 } 1178 }
1121 1179
@@ -1126,47 +1184,43 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
1126 extent_bytes = max_bytes - bitmap_bytes; 1184 extent_bytes = max_bytes - bitmap_bytes;
1127 extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2)); 1185 extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
1128 1186
1129 block_group->extents_thresh = 1187 ctl->extents_thresh =
1130 div64_u64(extent_bytes, (sizeof(struct btrfs_free_space))); 1188 div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
1131} 1189}
1132 1190
1133static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group, 1191static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1134 struct btrfs_free_space *info, u64 offset, 1192 struct btrfs_free_space *info, u64 offset,
1135 u64 bytes) 1193 u64 bytes)
1136{ 1194{
1137 unsigned long start, end; 1195 unsigned long start, count;
1138 unsigned long i;
1139 1196
1140 start = offset_to_bit(info->offset, block_group->sectorsize, offset); 1197 start = offset_to_bit(info->offset, ctl->unit, offset);
1141 end = start + bytes_to_bits(bytes, block_group->sectorsize); 1198 count = bytes_to_bits(bytes, ctl->unit);
1142 BUG_ON(end > BITS_PER_BITMAP); 1199 BUG_ON(start + count > BITS_PER_BITMAP);
1143 1200
1144 for (i = start; i < end; i++) 1201 bitmap_clear(info->bitmap, start, count);
1145 clear_bit(i, info->bitmap);
1146 1202
1147 info->bytes -= bytes; 1203 info->bytes -= bytes;
1148 block_group->free_space -= bytes; 1204 ctl->free_space -= bytes;
1149} 1205}
1150 1206
1151static void bitmap_set_bits(struct btrfs_block_group_cache *block_group, 1207static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
1152 struct btrfs_free_space *info, u64 offset, 1208 struct btrfs_free_space *info, u64 offset,
1153 u64 bytes) 1209 u64 bytes)
1154{ 1210{
1155 unsigned long start, end; 1211 unsigned long start, count;
1156 unsigned long i;
1157 1212
1158 start = offset_to_bit(info->offset, block_group->sectorsize, offset); 1213 start = offset_to_bit(info->offset, ctl->unit, offset);
1159 end = start + bytes_to_bits(bytes, block_group->sectorsize); 1214 count = bytes_to_bits(bytes, ctl->unit);
1160 BUG_ON(end > BITS_PER_BITMAP); 1215 BUG_ON(start + count > BITS_PER_BITMAP);
1161 1216
1162 for (i = start; i < end; i++) 1217 bitmap_set(info->bitmap, start, count);
1163 set_bit(i, info->bitmap);
1164 1218
1165 info->bytes += bytes; 1219 info->bytes += bytes;
1166 block_group->free_space += bytes; 1220 ctl->free_space += bytes;
1167} 1221}
1168 1222
1169static int search_bitmap(struct btrfs_block_group_cache *block_group, 1223static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1170 struct btrfs_free_space *bitmap_info, u64 *offset, 1224 struct btrfs_free_space *bitmap_info, u64 *offset,
1171 u64 *bytes) 1225 u64 *bytes)
1172{ 1226{
@@ -1174,9 +1228,9 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group,
1174 unsigned long bits, i; 1228 unsigned long bits, i;
1175 unsigned long next_zero; 1229 unsigned long next_zero;
1176 1230
1177 i = offset_to_bit(bitmap_info->offset, block_group->sectorsize, 1231 i = offset_to_bit(bitmap_info->offset, ctl->unit,
1178 max_t(u64, *offset, bitmap_info->offset)); 1232 max_t(u64, *offset, bitmap_info->offset));
1179 bits = bytes_to_bits(*bytes, block_group->sectorsize); 1233 bits = bytes_to_bits(*bytes, ctl->unit);
1180 1234
1181 for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i); 1235 for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
1182 i < BITS_PER_BITMAP; 1236 i < BITS_PER_BITMAP;
@@ -1191,29 +1245,25 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group,
1191 } 1245 }
1192 1246
1193 if (found_bits) { 1247 if (found_bits) {
1194 *offset = (u64)(i * block_group->sectorsize) + 1248 *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
1195 bitmap_info->offset; 1249 *bytes = (u64)(found_bits) * ctl->unit;
1196 *bytes = (u64)(found_bits) * block_group->sectorsize;
1197 return 0; 1250 return 0;
1198 } 1251 }
1199 1252
1200 return -1; 1253 return -1;
1201} 1254}
1202 1255
1203static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache 1256static struct btrfs_free_space *
1204 *block_group, u64 *offset, 1257find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
1205 u64 *bytes, int debug)
1206{ 1258{
1207 struct btrfs_free_space *entry; 1259 struct btrfs_free_space *entry;
1208 struct rb_node *node; 1260 struct rb_node *node;
1209 int ret; 1261 int ret;
1210 1262
1211 if (!block_group->free_space_offset.rb_node) 1263 if (!ctl->free_space_offset.rb_node)
1212 return NULL; 1264 return NULL;
1213 1265
1214 entry = tree_search_offset(block_group, 1266 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1215 offset_to_bitmap(block_group, *offset),
1216 0, 1);
1217 if (!entry) 1267 if (!entry)
1218 return NULL; 1268 return NULL;
1219 1269
@@ -1223,7 +1273,7 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache
1223 continue; 1273 continue;
1224 1274
1225 if (entry->bitmap) { 1275 if (entry->bitmap) {
1226 ret = search_bitmap(block_group, entry, offset, bytes); 1276 ret = search_bitmap(ctl, entry, offset, bytes);
1227 if (!ret) 1277 if (!ret)
1228 return entry; 1278 return entry;
1229 continue; 1279 continue;
@@ -1237,33 +1287,28 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache
1237 return NULL; 1287 return NULL;
1238} 1288}
1239 1289
1240static void add_new_bitmap(struct btrfs_block_group_cache *block_group, 1290static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1241 struct btrfs_free_space *info, u64 offset) 1291 struct btrfs_free_space *info, u64 offset)
1242{ 1292{
1243 u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize; 1293 info->offset = offset_to_bitmap(ctl, offset);
1244 int max_bitmaps = (int)div64_u64(block_group->key.offset +
1245 bytes_per_bg - 1, bytes_per_bg);
1246 BUG_ON(block_group->total_bitmaps >= max_bitmaps);
1247
1248 info->offset = offset_to_bitmap(block_group, offset);
1249 info->bytes = 0; 1294 info->bytes = 0;
1250 link_free_space(block_group, info); 1295 link_free_space(ctl, info);
1251 block_group->total_bitmaps++; 1296 ctl->total_bitmaps++;
1252 1297
1253 recalculate_thresholds(block_group); 1298 ctl->op->recalc_thresholds(ctl);
1254} 1299}
1255 1300
1256static void free_bitmap(struct btrfs_block_group_cache *block_group, 1301static void free_bitmap(struct btrfs_free_space_ctl *ctl,
1257 struct btrfs_free_space *bitmap_info) 1302 struct btrfs_free_space *bitmap_info)
1258{ 1303{
1259 unlink_free_space(block_group, bitmap_info); 1304 unlink_free_space(ctl, bitmap_info);
1260 kfree(bitmap_info->bitmap); 1305 kfree(bitmap_info->bitmap);
1261 kmem_cache_free(btrfs_free_space_cachep, bitmap_info); 1306 kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1262 block_group->total_bitmaps--; 1307 ctl->total_bitmaps--;
1263 recalculate_thresholds(block_group); 1308 ctl->op->recalc_thresholds(ctl);
1264} 1309}
1265 1310
1266static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, 1311static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
1267 struct btrfs_free_space *bitmap_info, 1312 struct btrfs_free_space *bitmap_info,
1268 u64 *offset, u64 *bytes) 1313 u64 *offset, u64 *bytes)
1269{ 1314{
@@ -1272,8 +1317,7 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro
1272 int ret; 1317 int ret;
1273 1318
1274again: 1319again:
1275 end = bitmap_info->offset + 1320 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
1276 (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1;
1277 1321
1278 /* 1322 /*
1279 * XXX - this can go away after a few releases. 1323 * XXX - this can go away after a few releases.
@@ -1288,24 +1332,22 @@ again:
1288 search_start = *offset; 1332 search_start = *offset;
1289 search_bytes = *bytes; 1333 search_bytes = *bytes;
1290 search_bytes = min(search_bytes, end - search_start + 1); 1334 search_bytes = min(search_bytes, end - search_start + 1);
1291 ret = search_bitmap(block_group, bitmap_info, &search_start, 1335 ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
1292 &search_bytes);
1293 BUG_ON(ret < 0 || search_start != *offset); 1336 BUG_ON(ret < 0 || search_start != *offset);
1294 1337
1295 if (*offset > bitmap_info->offset && *offset + *bytes > end) { 1338 if (*offset > bitmap_info->offset && *offset + *bytes > end) {
1296 bitmap_clear_bits(block_group, bitmap_info, *offset, 1339 bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1);
1297 end - *offset + 1);
1298 *bytes -= end - *offset + 1; 1340 *bytes -= end - *offset + 1;
1299 *offset = end + 1; 1341 *offset = end + 1;
1300 } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) { 1342 } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
1301 bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes); 1343 bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes);
1302 *bytes = 0; 1344 *bytes = 0;
1303 } 1345 }
1304 1346
1305 if (*bytes) { 1347 if (*bytes) {
1306 struct rb_node *next = rb_next(&bitmap_info->offset_index); 1348 struct rb_node *next = rb_next(&bitmap_info->offset_index);
1307 if (!bitmap_info->bytes) 1349 if (!bitmap_info->bytes)
1308 free_bitmap(block_group, bitmap_info); 1350 free_bitmap(ctl, bitmap_info);
1309 1351
1310 /* 1352 /*
1311 * no entry after this bitmap, but we still have bytes to 1353 * no entry after this bitmap, but we still have bytes to
@@ -1332,31 +1374,28 @@ again:
1332 */ 1374 */
1333 search_start = *offset; 1375 search_start = *offset;
1334 search_bytes = *bytes; 1376 search_bytes = *bytes;
1335 ret = search_bitmap(block_group, bitmap_info, &search_start, 1377 ret = search_bitmap(ctl, bitmap_info, &search_start,
1336 &search_bytes); 1378 &search_bytes);
1337 if (ret < 0 || search_start != *offset) 1379 if (ret < 0 || search_start != *offset)
1338 return -EAGAIN; 1380 return -EAGAIN;
1339 1381
1340 goto again; 1382 goto again;
1341 } else if (!bitmap_info->bytes) 1383 } else if (!bitmap_info->bytes)
1342 free_bitmap(block_group, bitmap_info); 1384 free_bitmap(ctl, bitmap_info);
1343 1385
1344 return 0; 1386 return 0;
1345} 1387}
1346 1388
1347static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, 1389static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1348 struct btrfs_free_space *info) 1390 struct btrfs_free_space *info)
1349{ 1391{
1350 struct btrfs_free_space *bitmap_info; 1392 struct btrfs_block_group_cache *block_group = ctl->private;
1351 int added = 0;
1352 u64 bytes, offset, end;
1353 int ret;
1354 1393
1355 /* 1394 /*
1356 * If we are below the extents threshold then we can add this as an 1395 * If we are below the extents threshold then we can add this as an
1357 * extent, and don't have to deal with the bitmap 1396 * extent, and don't have to deal with the bitmap
1358 */ 1397 */
1359 if (block_group->free_extents < block_group->extents_thresh) { 1398 if (ctl->free_extents < ctl->extents_thresh) {
1360 /* 1399 /*
1361 * If this block group has some small extents we don't want to 1400 * If this block group has some small extents we don't want to
1362 * use up all of our free slots in the cache with them, we want 1401 * use up all of our free slots in the cache with them, we want
@@ -1365,11 +1404,10 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
1365 * the overhead of a bitmap if we don't have to. 1404 * the overhead of a bitmap if we don't have to.
1366 */ 1405 */
1367 if (info->bytes <= block_group->sectorsize * 4) { 1406 if (info->bytes <= block_group->sectorsize * 4) {
1368 if (block_group->free_extents * 2 <= 1407 if (ctl->free_extents * 2 <= ctl->extents_thresh)
1369 block_group->extents_thresh) 1408 return false;
1370 return 0;
1371 } else { 1409 } else {
1372 return 0; 1410 return false;
1373 } 1411 }
1374 } 1412 }
1375 1413
@@ -1379,31 +1417,42 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
1379 */ 1417 */
1380 if (BITS_PER_BITMAP * block_group->sectorsize > 1418 if (BITS_PER_BITMAP * block_group->sectorsize >
1381 block_group->key.offset) 1419 block_group->key.offset)
1382 return 0; 1420 return false;
1421
1422 return true;
1423}
1424
1425static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
1426 struct btrfs_free_space *info)
1427{
1428 struct btrfs_free_space *bitmap_info;
1429 int added = 0;
1430 u64 bytes, offset, end;
1431 int ret;
1383 1432
1384 bytes = info->bytes; 1433 bytes = info->bytes;
1385 offset = info->offset; 1434 offset = info->offset;
1386 1435
1436 if (!ctl->op->use_bitmap(ctl, info))
1437 return 0;
1438
1387again: 1439again:
1388 bitmap_info = tree_search_offset(block_group, 1440 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1389 offset_to_bitmap(block_group, offset),
1390 1, 0); 1441 1, 0);
1391 if (!bitmap_info) { 1442 if (!bitmap_info) {
1392 BUG_ON(added); 1443 BUG_ON(added);
1393 goto new_bitmap; 1444 goto new_bitmap;
1394 } 1445 }
1395 1446
1396 end = bitmap_info->offset + 1447 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
1397 (u64)(BITS_PER_BITMAP * block_group->sectorsize);
1398 1448
1399 if (offset >= bitmap_info->offset && offset + bytes > end) { 1449 if (offset >= bitmap_info->offset && offset + bytes > end) {
1400 bitmap_set_bits(block_group, bitmap_info, offset, 1450 bitmap_set_bits(ctl, bitmap_info, offset, end - offset);
1401 end - offset);
1402 bytes -= end - offset; 1451 bytes -= end - offset;
1403 offset = end; 1452 offset = end;
1404 added = 0; 1453 added = 0;
1405 } else if (offset >= bitmap_info->offset && offset + bytes <= end) { 1454 } else if (offset >= bitmap_info->offset && offset + bytes <= end) {
1406 bitmap_set_bits(block_group, bitmap_info, offset, bytes); 1455 bitmap_set_bits(ctl, bitmap_info, offset, bytes);
1407 bytes = 0; 1456 bytes = 0;
1408 } else { 1457 } else {
1409 BUG(); 1458 BUG();
@@ -1417,19 +1466,19 @@ again:
1417 1466
1418new_bitmap: 1467new_bitmap:
1419 if (info && info->bitmap) { 1468 if (info && info->bitmap) {
1420 add_new_bitmap(block_group, info, offset); 1469 add_new_bitmap(ctl, info, offset);
1421 added = 1; 1470 added = 1;
1422 info = NULL; 1471 info = NULL;
1423 goto again; 1472 goto again;
1424 } else { 1473 } else {
1425 spin_unlock(&block_group->tree_lock); 1474 spin_unlock(&ctl->tree_lock);
1426 1475
1427 /* no pre-allocated info, allocate a new one */ 1476 /* no pre-allocated info, allocate a new one */
1428 if (!info) { 1477 if (!info) {
1429 info = kmem_cache_zalloc(btrfs_free_space_cachep, 1478 info = kmem_cache_zalloc(btrfs_free_space_cachep,
1430 GFP_NOFS); 1479 GFP_NOFS);
1431 if (!info) { 1480 if (!info) {
1432 spin_lock(&block_group->tree_lock); 1481 spin_lock(&ctl->tree_lock);
1433 ret = -ENOMEM; 1482 ret = -ENOMEM;
1434 goto out; 1483 goto out;
1435 } 1484 }
@@ -1437,7 +1486,7 @@ new_bitmap:
1437 1486
1438 /* allocate the bitmap */ 1487 /* allocate the bitmap */
1439 info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); 1488 info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
1440 spin_lock(&block_group->tree_lock); 1489 spin_lock(&ctl->tree_lock);
1441 if (!info->bitmap) { 1490 if (!info->bitmap) {
1442 ret = -ENOMEM; 1491 ret = -ENOMEM;
1443 goto out; 1492 goto out;
@@ -1455,7 +1504,7 @@ out:
1455 return ret; 1504 return ret;
1456} 1505}
1457 1506
1458bool try_merge_free_space(struct btrfs_block_group_cache *block_group, 1507static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
1459 struct btrfs_free_space *info, bool update_stat) 1508 struct btrfs_free_space *info, bool update_stat)
1460{ 1509{
1461 struct btrfs_free_space *left_info; 1510 struct btrfs_free_space *left_info;
@@ -1469,18 +1518,18 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
1469 * are adding, if there is remove that struct and add a new one to 1518 * are adding, if there is remove that struct and add a new one to
1470 * cover the entire range 1519 * cover the entire range
1471 */ 1520 */
1472 right_info = tree_search_offset(block_group, offset + bytes, 0, 0); 1521 right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
1473 if (right_info && rb_prev(&right_info->offset_index)) 1522 if (right_info && rb_prev(&right_info->offset_index))
1474 left_info = rb_entry(rb_prev(&right_info->offset_index), 1523 left_info = rb_entry(rb_prev(&right_info->offset_index),
1475 struct btrfs_free_space, offset_index); 1524 struct btrfs_free_space, offset_index);
1476 else 1525 else
1477 left_info = tree_search_offset(block_group, offset - 1, 0, 0); 1526 left_info = tree_search_offset(ctl, offset - 1, 0, 0);
1478 1527
1479 if (right_info && !right_info->bitmap) { 1528 if (right_info && !right_info->bitmap) {
1480 if (update_stat) 1529 if (update_stat)
1481 unlink_free_space(block_group, right_info); 1530 unlink_free_space(ctl, right_info);
1482 else 1531 else
1483 __unlink_free_space(block_group, right_info); 1532 __unlink_free_space(ctl, right_info);
1484 info->bytes += right_info->bytes; 1533 info->bytes += right_info->bytes;
1485 kmem_cache_free(btrfs_free_space_cachep, right_info); 1534 kmem_cache_free(btrfs_free_space_cachep, right_info);
1486 merged = true; 1535 merged = true;
@@ -1489,9 +1538,9 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
1489 if (left_info && !left_info->bitmap && 1538 if (left_info && !left_info->bitmap &&
1490 left_info->offset + left_info->bytes == offset) { 1539 left_info->offset + left_info->bytes == offset) {
1491 if (update_stat) 1540 if (update_stat)
1492 unlink_free_space(block_group, left_info); 1541 unlink_free_space(ctl, left_info);
1493 else 1542 else
1494 __unlink_free_space(block_group, left_info); 1543 __unlink_free_space(ctl, left_info);
1495 info->offset = left_info->offset; 1544 info->offset = left_info->offset;
1496 info->bytes += left_info->bytes; 1545 info->bytes += left_info->bytes;
1497 kmem_cache_free(btrfs_free_space_cachep, left_info); 1546 kmem_cache_free(btrfs_free_space_cachep, left_info);
@@ -1501,8 +1550,8 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
1501 return merged; 1550 return merged;
1502} 1551}
1503 1552
1504int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, 1553int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
1505 u64 offset, u64 bytes) 1554 u64 offset, u64 bytes)
1506{ 1555{
1507 struct btrfs_free_space *info; 1556 struct btrfs_free_space *info;
1508 int ret = 0; 1557 int ret = 0;
@@ -1514,9 +1563,9 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
1514 info->offset = offset; 1563 info->offset = offset;
1515 info->bytes = bytes; 1564 info->bytes = bytes;
1516 1565
1517 spin_lock(&block_group->tree_lock); 1566 spin_lock(&ctl->tree_lock);
1518 1567
1519 if (try_merge_free_space(block_group, info, true)) 1568 if (try_merge_free_space(ctl, info, true))
1520 goto link; 1569 goto link;
1521 1570
1522 /* 1571 /*
@@ -1524,7 +1573,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
1524 * extent then we know we're going to have to allocate a new extent, so 1573 * extent then we know we're going to have to allocate a new extent, so
1525 * before we do that see if we need to drop this into a bitmap 1574 * before we do that see if we need to drop this into a bitmap
1526 */ 1575 */
1527 ret = insert_into_bitmap(block_group, info); 1576 ret = insert_into_bitmap(ctl, info);
1528 if (ret < 0) { 1577 if (ret < 0) {
1529 goto out; 1578 goto out;
1530 } else if (ret) { 1579 } else if (ret) {
@@ -1532,11 +1581,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
1532 goto out; 1581 goto out;
1533 } 1582 }
1534link: 1583link:
1535 ret = link_free_space(block_group, info); 1584 ret = link_free_space(ctl, info);
1536 if (ret) 1585 if (ret)
1537 kmem_cache_free(btrfs_free_space_cachep, info); 1586 kmem_cache_free(btrfs_free_space_cachep, info);
1538out: 1587out:
1539 spin_unlock(&block_group->tree_lock); 1588 spin_unlock(&ctl->tree_lock);
1540 1589
1541 if (ret) { 1590 if (ret) {
1542 printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret); 1591 printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
@@ -1549,21 +1598,21 @@ out:
1549int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, 1598int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
1550 u64 offset, u64 bytes) 1599 u64 offset, u64 bytes)
1551{ 1600{
1601 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1552 struct btrfs_free_space *info; 1602 struct btrfs_free_space *info;
1553 struct btrfs_free_space *next_info = NULL; 1603 struct btrfs_free_space *next_info = NULL;
1554 int ret = 0; 1604 int ret = 0;
1555 1605
1556 spin_lock(&block_group->tree_lock); 1606 spin_lock(&ctl->tree_lock);
1557 1607
1558again: 1608again:
1559 info = tree_search_offset(block_group, offset, 0, 0); 1609 info = tree_search_offset(ctl, offset, 0, 0);
1560 if (!info) { 1610 if (!info) {
1561 /* 1611 /*
1562 * oops didn't find an extent that matched the space we wanted 1612 * oops didn't find an extent that matched the space we wanted
1563 * to remove, look for a bitmap instead 1613 * to remove, look for a bitmap instead
1564 */ 1614 */
1565 info = tree_search_offset(block_group, 1615 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1566 offset_to_bitmap(block_group, offset),
1567 1, 0); 1616 1, 0);
1568 if (!info) { 1617 if (!info) {
1569 WARN_ON(1); 1618 WARN_ON(1);
@@ -1578,8 +1627,8 @@ again:
1578 offset_index); 1627 offset_index);
1579 1628
1580 if (next_info->bitmap) 1629 if (next_info->bitmap)
1581 end = next_info->offset + BITS_PER_BITMAP * 1630 end = next_info->offset +
1582 block_group->sectorsize - 1; 1631 BITS_PER_BITMAP * ctl->unit - 1;
1583 else 1632 else
1584 end = next_info->offset + next_info->bytes; 1633 end = next_info->offset + next_info->bytes;
1585 1634
@@ -1599,20 +1648,20 @@ again:
1599 } 1648 }
1600 1649
1601 if (info->bytes == bytes) { 1650 if (info->bytes == bytes) {
1602 unlink_free_space(block_group, info); 1651 unlink_free_space(ctl, info);
1603 if (info->bitmap) { 1652 if (info->bitmap) {
1604 kfree(info->bitmap); 1653 kfree(info->bitmap);
1605 block_group->total_bitmaps--; 1654 ctl->total_bitmaps--;
1606 } 1655 }
1607 kmem_cache_free(btrfs_free_space_cachep, info); 1656 kmem_cache_free(btrfs_free_space_cachep, info);
1608 goto out_lock; 1657 goto out_lock;
1609 } 1658 }
1610 1659
1611 if (!info->bitmap && info->offset == offset) { 1660 if (!info->bitmap && info->offset == offset) {
1612 unlink_free_space(block_group, info); 1661 unlink_free_space(ctl, info);
1613 info->offset += bytes; 1662 info->offset += bytes;
1614 info->bytes -= bytes; 1663 info->bytes -= bytes;
1615 link_free_space(block_group, info); 1664 link_free_space(ctl, info);
1616 goto out_lock; 1665 goto out_lock;
1617 } 1666 }
1618 1667
@@ -1626,13 +1675,13 @@ again:
1626 * first unlink the old info and then 1675 * first unlink the old info and then
1627 * insert it again after the hole we're creating 1676 * insert it again after the hole we're creating
1628 */ 1677 */
1629 unlink_free_space(block_group, info); 1678 unlink_free_space(ctl, info);
1630 if (offset + bytes < info->offset + info->bytes) { 1679 if (offset + bytes < info->offset + info->bytes) {
1631 u64 old_end = info->offset + info->bytes; 1680 u64 old_end = info->offset + info->bytes;
1632 1681
1633 info->offset = offset + bytes; 1682 info->offset = offset + bytes;
1634 info->bytes = old_end - info->offset; 1683 info->bytes = old_end - info->offset;
1635 ret = link_free_space(block_group, info); 1684 ret = link_free_space(ctl, info);
1636 WARN_ON(ret); 1685 WARN_ON(ret);
1637 if (ret) 1686 if (ret)
1638 goto out_lock; 1687 goto out_lock;
@@ -1642,7 +1691,7 @@ again:
1642 */ 1691 */
1643 kmem_cache_free(btrfs_free_space_cachep, info); 1692 kmem_cache_free(btrfs_free_space_cachep, info);
1644 } 1693 }
1645 spin_unlock(&block_group->tree_lock); 1694 spin_unlock(&ctl->tree_lock);
1646 1695
1647 /* step two, insert a new info struct to cover 1696 /* step two, insert a new info struct to cover
1648 * anything before the hole 1697 * anything before the hole
@@ -1653,12 +1702,12 @@ again:
1653 goto out; 1702 goto out;
1654 } 1703 }
1655 1704
1656 ret = remove_from_bitmap(block_group, info, &offset, &bytes); 1705 ret = remove_from_bitmap(ctl, info, &offset, &bytes);
1657 if (ret == -EAGAIN) 1706 if (ret == -EAGAIN)
1658 goto again; 1707 goto again;
1659 BUG_ON(ret); 1708 BUG_ON(ret);
1660out_lock: 1709out_lock:
1661 spin_unlock(&block_group->tree_lock); 1710 spin_unlock(&ctl->tree_lock);
1662out: 1711out:
1663 return ret; 1712 return ret;
1664} 1713}
@@ -1666,11 +1715,12 @@ out:
1666void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, 1715void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
1667 u64 bytes) 1716 u64 bytes)
1668{ 1717{
1718 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1669 struct btrfs_free_space *info; 1719 struct btrfs_free_space *info;
1670 struct rb_node *n; 1720 struct rb_node *n;
1671 int count = 0; 1721 int count = 0;
1672 1722
1673 for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) { 1723 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
1674 info = rb_entry(n, struct btrfs_free_space, offset_index); 1724 info = rb_entry(n, struct btrfs_free_space, offset_index);
1675 if (info->bytes >= bytes) 1725 if (info->bytes >= bytes)
1676 count++; 1726 count++;
@@ -1685,19 +1735,28 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
1685 "\n", count); 1735 "\n", count);
1686} 1736}
1687 1737
1688u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group) 1738static struct btrfs_free_space_op free_space_op = {
1739 .recalc_thresholds = recalculate_thresholds,
1740 .use_bitmap = use_bitmap,
1741};
1742
1743void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
1689{ 1744{
1690 struct btrfs_free_space *info; 1745 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1691 struct rb_node *n;
1692 u64 ret = 0;
1693 1746
1694 for (n = rb_first(&block_group->free_space_offset); n; 1747 spin_lock_init(&ctl->tree_lock);
1695 n = rb_next(n)) { 1748 ctl->unit = block_group->sectorsize;
1696 info = rb_entry(n, struct btrfs_free_space, offset_index); 1749 ctl->start = block_group->key.objectid;
1697 ret += info->bytes; 1750 ctl->private = block_group;
1698 } 1751 ctl->op = &free_space_op;
1699 1752
1700 return ret; 1753 /*
1754 * we only want to have 32k of ram per block group for keeping
1755 * track of free space, and if we pass 1/2 of that we want to
1756 * start converting things over to using bitmaps
1757 */
1758 ctl->extents_thresh = ((1024 * 32) / 2) /
1759 sizeof(struct btrfs_free_space);
1701} 1760}
1702 1761
1703/* 1762/*
@@ -1711,6 +1770,7 @@ __btrfs_return_cluster_to_free_space(
1711 struct btrfs_block_group_cache *block_group, 1770 struct btrfs_block_group_cache *block_group,
1712 struct btrfs_free_cluster *cluster) 1771 struct btrfs_free_cluster *cluster)
1713{ 1772{
1773 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1714 struct btrfs_free_space *entry; 1774 struct btrfs_free_space *entry;
1715 struct rb_node *node; 1775 struct rb_node *node;
1716 1776
@@ -1732,8 +1792,8 @@ __btrfs_return_cluster_to_free_space(
1732 1792
1733 bitmap = (entry->bitmap != NULL); 1793 bitmap = (entry->bitmap != NULL);
1734 if (!bitmap) 1794 if (!bitmap)
1735 try_merge_free_space(block_group, entry, false); 1795 try_merge_free_space(ctl, entry, false);
1736 tree_insert_offset(&block_group->free_space_offset, 1796 tree_insert_offset(&ctl->free_space_offset,
1737 entry->offset, &entry->offset_index, bitmap); 1797 entry->offset, &entry->offset_index, bitmap);
1738 } 1798 }
1739 cluster->root = RB_ROOT; 1799 cluster->root = RB_ROOT;
@@ -1744,14 +1804,38 @@ out:
1744 return 0; 1804 return 0;
1745} 1805}
1746 1806
1747void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) 1807void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
1748{ 1808{
1749 struct btrfs_free_space *info; 1809 struct btrfs_free_space *info;
1750 struct rb_node *node; 1810 struct rb_node *node;
1811
1812 while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
1813 info = rb_entry(node, struct btrfs_free_space, offset_index);
1814 unlink_free_space(ctl, info);
1815 kfree(info->bitmap);
1816 kmem_cache_free(btrfs_free_space_cachep, info);
1817 if (need_resched()) {
1818 spin_unlock(&ctl->tree_lock);
1819 cond_resched();
1820 spin_lock(&ctl->tree_lock);
1821 }
1822 }
1823}
1824
1825void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
1826{
1827 spin_lock(&ctl->tree_lock);
1828 __btrfs_remove_free_space_cache_locked(ctl);
1829 spin_unlock(&ctl->tree_lock);
1830}
1831
1832void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
1833{
1834 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1751 struct btrfs_free_cluster *cluster; 1835 struct btrfs_free_cluster *cluster;
1752 struct list_head *head; 1836 struct list_head *head;
1753 1837
1754 spin_lock(&block_group->tree_lock); 1838 spin_lock(&ctl->tree_lock);
1755 while ((head = block_group->cluster_list.next) != 1839 while ((head = block_group->cluster_list.next) !=
1756 &block_group->cluster_list) { 1840 &block_group->cluster_list) {
1757 cluster = list_entry(head, struct btrfs_free_cluster, 1841 cluster = list_entry(head, struct btrfs_free_cluster,
@@ -1760,60 +1844,46 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
1760 WARN_ON(cluster->block_group != block_group); 1844 WARN_ON(cluster->block_group != block_group);
1761 __btrfs_return_cluster_to_free_space(block_group, cluster); 1845 __btrfs_return_cluster_to_free_space(block_group, cluster);
1762 if (need_resched()) { 1846 if (need_resched()) {
1763 spin_unlock(&block_group->tree_lock); 1847 spin_unlock(&ctl->tree_lock);
1764 cond_resched(); 1848 cond_resched();
1765 spin_lock(&block_group->tree_lock); 1849 spin_lock(&ctl->tree_lock);
1766 } 1850 }
1767 } 1851 }
1852 __btrfs_remove_free_space_cache_locked(ctl);
1853 spin_unlock(&ctl->tree_lock);
1768 1854
1769 while ((node = rb_last(&block_group->free_space_offset)) != NULL) {
1770 info = rb_entry(node, struct btrfs_free_space, offset_index);
1771 if (!info->bitmap) {
1772 unlink_free_space(block_group, info);
1773 kmem_cache_free(btrfs_free_space_cachep, info);
1774 } else {
1775 free_bitmap(block_group, info);
1776 }
1777
1778 if (need_resched()) {
1779 spin_unlock(&block_group->tree_lock);
1780 cond_resched();
1781 spin_lock(&block_group->tree_lock);
1782 }
1783 }
1784
1785 spin_unlock(&block_group->tree_lock);
1786} 1855}
1787 1856
1788u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, 1857u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
1789 u64 offset, u64 bytes, u64 empty_size) 1858 u64 offset, u64 bytes, u64 empty_size)
1790{ 1859{
1860 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1791 struct btrfs_free_space *entry = NULL; 1861 struct btrfs_free_space *entry = NULL;
1792 u64 bytes_search = bytes + empty_size; 1862 u64 bytes_search = bytes + empty_size;
1793 u64 ret = 0; 1863 u64 ret = 0;
1794 1864
1795 spin_lock(&block_group->tree_lock); 1865 spin_lock(&ctl->tree_lock);
1796 entry = find_free_space(block_group, &offset, &bytes_search, 0); 1866 entry = find_free_space(ctl, &offset, &bytes_search);
1797 if (!entry) 1867 if (!entry)
1798 goto out; 1868 goto out;
1799 1869
1800 ret = offset; 1870 ret = offset;
1801 if (entry->bitmap) { 1871 if (entry->bitmap) {
1802 bitmap_clear_bits(block_group, entry, offset, bytes); 1872 bitmap_clear_bits(ctl, entry, offset, bytes);
1803 if (!entry->bytes) 1873 if (!entry->bytes)
1804 free_bitmap(block_group, entry); 1874 free_bitmap(ctl, entry);
1805 } else { 1875 } else {
1806 unlink_free_space(block_group, entry); 1876 unlink_free_space(ctl, entry);
1807 entry->offset += bytes; 1877 entry->offset += bytes;
1808 entry->bytes -= bytes; 1878 entry->bytes -= bytes;
1809 if (!entry->bytes) 1879 if (!entry->bytes)
1810 kmem_cache_free(btrfs_free_space_cachep, entry); 1880 kmem_cache_free(btrfs_free_space_cachep, entry);
1811 else 1881 else
1812 link_free_space(block_group, entry); 1882 link_free_space(ctl, entry);
1813 } 1883 }
1814 1884
1815out: 1885out:
1816 spin_unlock(&block_group->tree_lock); 1886 spin_unlock(&ctl->tree_lock);
1817 1887
1818 return ret; 1888 return ret;
1819} 1889}
@@ -1830,6 +1900,7 @@ int btrfs_return_cluster_to_free_space(
1830 struct btrfs_block_group_cache *block_group, 1900 struct btrfs_block_group_cache *block_group,
1831 struct btrfs_free_cluster *cluster) 1901 struct btrfs_free_cluster *cluster)
1832{ 1902{
1903 struct btrfs_free_space_ctl *ctl;
1833 int ret; 1904 int ret;
1834 1905
1835 /* first, get a safe pointer to the block group */ 1906 /* first, get a safe pointer to the block group */
@@ -1848,10 +1919,12 @@ int btrfs_return_cluster_to_free_space(
1848 atomic_inc(&block_group->count); 1919 atomic_inc(&block_group->count);
1849 spin_unlock(&cluster->lock); 1920 spin_unlock(&cluster->lock);
1850 1921
1922 ctl = block_group->free_space_ctl;
1923
1851 /* now return any extents the cluster had on it */ 1924 /* now return any extents the cluster had on it */
1852 spin_lock(&block_group->tree_lock); 1925 spin_lock(&ctl->tree_lock);
1853 ret = __btrfs_return_cluster_to_free_space(block_group, cluster); 1926 ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
1854 spin_unlock(&block_group->tree_lock); 1927 spin_unlock(&ctl->tree_lock);
1855 1928
1856 /* finally drop our ref */ 1929 /* finally drop our ref */
1857 btrfs_put_block_group(block_group); 1930 btrfs_put_block_group(block_group);
@@ -1863,6 +1936,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
1863 struct btrfs_free_space *entry, 1936 struct btrfs_free_space *entry,
1864 u64 bytes, u64 min_start) 1937 u64 bytes, u64 min_start)
1865{ 1938{
1939 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1866 int err; 1940 int err;
1867 u64 search_start = cluster->window_start; 1941 u64 search_start = cluster->window_start;
1868 u64 search_bytes = bytes; 1942 u64 search_bytes = bytes;
@@ -1871,13 +1945,12 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
1871 search_start = min_start; 1945 search_start = min_start;
1872 search_bytes = bytes; 1946 search_bytes = bytes;
1873 1947
1874 err = search_bitmap(block_group, entry, &search_start, 1948 err = search_bitmap(ctl, entry, &search_start, &search_bytes);
1875 &search_bytes);
1876 if (err) 1949 if (err)
1877 return 0; 1950 return 0;
1878 1951
1879 ret = search_start; 1952 ret = search_start;
1880 bitmap_clear_bits(block_group, entry, ret, bytes); 1953 bitmap_clear_bits(ctl, entry, ret, bytes);
1881 1954
1882 return ret; 1955 return ret;
1883} 1956}
@@ -1891,6 +1964,7 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
1891 struct btrfs_free_cluster *cluster, u64 bytes, 1964 struct btrfs_free_cluster *cluster, u64 bytes,
1892 u64 min_start) 1965 u64 min_start)
1893{ 1966{
1967 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1894 struct btrfs_free_space *entry = NULL; 1968 struct btrfs_free_space *entry = NULL;
1895 struct rb_node *node; 1969 struct rb_node *node;
1896 u64 ret = 0; 1970 u64 ret = 0;
@@ -1910,8 +1984,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
1910 while(1) { 1984 while(1) {
1911 if (entry->bytes < bytes || 1985 if (entry->bytes < bytes ||
1912 (!entry->bitmap && entry->offset < min_start)) { 1986 (!entry->bitmap && entry->offset < min_start)) {
1913 struct rb_node *node;
1914
1915 node = rb_next(&entry->offset_index); 1987 node = rb_next(&entry->offset_index);
1916 if (!node) 1988 if (!node)
1917 break; 1989 break;
@@ -1925,7 +1997,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
1925 cluster, entry, bytes, 1997 cluster, entry, bytes,
1926 min_start); 1998 min_start);
1927 if (ret == 0) { 1999 if (ret == 0) {
1928 struct rb_node *node;
1929 node = rb_next(&entry->offset_index); 2000 node = rb_next(&entry->offset_index);
1930 if (!node) 2001 if (!node)
1931 break; 2002 break;
@@ -1951,20 +2022,20 @@ out:
1951 if (!ret) 2022 if (!ret)
1952 return 0; 2023 return 0;
1953 2024
1954 spin_lock(&block_group->tree_lock); 2025 spin_lock(&ctl->tree_lock);
1955 2026
1956 block_group->free_space -= bytes; 2027 ctl->free_space -= bytes;
1957 if (entry->bytes == 0) { 2028 if (entry->bytes == 0) {
1958 block_group->free_extents--; 2029 ctl->free_extents--;
1959 if (entry->bitmap) { 2030 if (entry->bitmap) {
1960 kfree(entry->bitmap); 2031 kfree(entry->bitmap);
1961 block_group->total_bitmaps--; 2032 ctl->total_bitmaps--;
1962 recalculate_thresholds(block_group); 2033 ctl->op->recalc_thresholds(ctl);
1963 } 2034 }
1964 kmem_cache_free(btrfs_free_space_cachep, entry); 2035 kmem_cache_free(btrfs_free_space_cachep, entry);
1965 } 2036 }
1966 2037
1967 spin_unlock(&block_group->tree_lock); 2038 spin_unlock(&ctl->tree_lock);
1968 2039
1969 return ret; 2040 return ret;
1970} 2041}
@@ -1974,6 +2045,7 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
1974 struct btrfs_free_cluster *cluster, 2045 struct btrfs_free_cluster *cluster,
1975 u64 offset, u64 bytes, u64 min_bytes) 2046 u64 offset, u64 bytes, u64 min_bytes)
1976{ 2047{
2048 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1977 unsigned long next_zero; 2049 unsigned long next_zero;
1978 unsigned long i; 2050 unsigned long i;
1979 unsigned long search_bits; 2051 unsigned long search_bits;
@@ -2028,7 +2100,7 @@ again:
2028 2100
2029 cluster->window_start = start * block_group->sectorsize + 2101 cluster->window_start = start * block_group->sectorsize +
2030 entry->offset; 2102 entry->offset;
2031 rb_erase(&entry->offset_index, &block_group->free_space_offset); 2103 rb_erase(&entry->offset_index, &ctl->free_space_offset);
2032 ret = tree_insert_offset(&cluster->root, entry->offset, 2104 ret = tree_insert_offset(&cluster->root, entry->offset,
2033 &entry->offset_index, 1); 2105 &entry->offset_index, 1);
2034 BUG_ON(ret); 2106 BUG_ON(ret);
@@ -2043,6 +2115,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2043 struct btrfs_free_cluster *cluster, 2115 struct btrfs_free_cluster *cluster,
2044 u64 offset, u64 bytes, u64 min_bytes) 2116 u64 offset, u64 bytes, u64 min_bytes)
2045{ 2117{
2118 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2046 struct btrfs_free_space *first = NULL; 2119 struct btrfs_free_space *first = NULL;
2047 struct btrfs_free_space *entry = NULL; 2120 struct btrfs_free_space *entry = NULL;
2048 struct btrfs_free_space *prev = NULL; 2121 struct btrfs_free_space *prev = NULL;
@@ -2053,7 +2126,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2053 u64 max_extent; 2126 u64 max_extent;
2054 u64 max_gap = 128 * 1024; 2127 u64 max_gap = 128 * 1024;
2055 2128
2056 entry = tree_search_offset(block_group, offset, 0, 1); 2129 entry = tree_search_offset(ctl, offset, 0, 1);
2057 if (!entry) 2130 if (!entry)
2058 return -ENOSPC; 2131 return -ENOSPC;
2059 2132
@@ -2119,7 +2192,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2119 if (entry->bitmap) 2192 if (entry->bitmap)
2120 continue; 2193 continue;
2121 2194
2122 rb_erase(&entry->offset_index, &block_group->free_space_offset); 2195 rb_erase(&entry->offset_index, &ctl->free_space_offset);
2123 ret = tree_insert_offset(&cluster->root, entry->offset, 2196 ret = tree_insert_offset(&cluster->root, entry->offset,
2124 &entry->offset_index, 0); 2197 &entry->offset_index, 0);
2125 BUG_ON(ret); 2198 BUG_ON(ret);
@@ -2138,16 +2211,15 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2138 struct btrfs_free_cluster *cluster, 2211 struct btrfs_free_cluster *cluster,
2139 u64 offset, u64 bytes, u64 min_bytes) 2212 u64 offset, u64 bytes, u64 min_bytes)
2140{ 2213{
2214 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2141 struct btrfs_free_space *entry; 2215 struct btrfs_free_space *entry;
2142 struct rb_node *node; 2216 struct rb_node *node;
2143 int ret = -ENOSPC; 2217 int ret = -ENOSPC;
2144 2218
2145 if (block_group->total_bitmaps == 0) 2219 if (ctl->total_bitmaps == 0)
2146 return -ENOSPC; 2220 return -ENOSPC;
2147 2221
2148 entry = tree_search_offset(block_group, 2222 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
2149 offset_to_bitmap(block_group, offset),
2150 0, 1);
2151 if (!entry) 2223 if (!entry)
2152 return -ENOSPC; 2224 return -ENOSPC;
2153 2225
@@ -2180,6 +2252,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2180 struct btrfs_free_cluster *cluster, 2252 struct btrfs_free_cluster *cluster,
2181 u64 offset, u64 bytes, u64 empty_size) 2253 u64 offset, u64 bytes, u64 empty_size)
2182{ 2254{
2255 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2183 u64 min_bytes; 2256 u64 min_bytes;
2184 int ret; 2257 int ret;
2185 2258
@@ -2199,14 +2272,14 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2199 } else 2272 } else
2200 min_bytes = max(bytes, (bytes + empty_size) >> 2); 2273 min_bytes = max(bytes, (bytes + empty_size) >> 2);
2201 2274
2202 spin_lock(&block_group->tree_lock); 2275 spin_lock(&ctl->tree_lock);
2203 2276
2204 /* 2277 /*
2205 * If we know we don't have enough space to make a cluster don't even 2278 * If we know we don't have enough space to make a cluster don't even
2206 * bother doing all the work to try and find one. 2279 * bother doing all the work to try and find one.
2207 */ 2280 */
2208 if (block_group->free_space < min_bytes) { 2281 if (ctl->free_space < min_bytes) {
2209 spin_unlock(&block_group->tree_lock); 2282 spin_unlock(&ctl->tree_lock);
2210 return -ENOSPC; 2283 return -ENOSPC;
2211 } 2284 }
2212 2285
@@ -2232,7 +2305,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2232 } 2305 }
2233out: 2306out:
2234 spin_unlock(&cluster->lock); 2307 spin_unlock(&cluster->lock);
2235 spin_unlock(&block_group->tree_lock); 2308 spin_unlock(&ctl->tree_lock);
2236 2309
2237 return ret; 2310 return ret;
2238} 2311}
@@ -2253,6 +2326,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
2253int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, 2326int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
2254 u64 *trimmed, u64 start, u64 end, u64 minlen) 2327 u64 *trimmed, u64 start, u64 end, u64 minlen)
2255{ 2328{
2329 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2256 struct btrfs_free_space *entry = NULL; 2330 struct btrfs_free_space *entry = NULL;
2257 struct btrfs_fs_info *fs_info = block_group->fs_info; 2331 struct btrfs_fs_info *fs_info = block_group->fs_info;
2258 u64 bytes = 0; 2332 u64 bytes = 0;
@@ -2262,52 +2336,50 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
2262 *trimmed = 0; 2336 *trimmed = 0;
2263 2337
2264 while (start < end) { 2338 while (start < end) {
2265 spin_lock(&block_group->tree_lock); 2339 spin_lock(&ctl->tree_lock);
2266 2340
2267 if (block_group->free_space < minlen) { 2341 if (ctl->free_space < minlen) {
2268 spin_unlock(&block_group->tree_lock); 2342 spin_unlock(&ctl->tree_lock);
2269 break; 2343 break;
2270 } 2344 }
2271 2345
2272 entry = tree_search_offset(block_group, start, 0, 1); 2346 entry = tree_search_offset(ctl, start, 0, 1);
2273 if (!entry) 2347 if (!entry)
2274 entry = tree_search_offset(block_group, 2348 entry = tree_search_offset(ctl,
2275 offset_to_bitmap(block_group, 2349 offset_to_bitmap(ctl, start),
2276 start),
2277 1, 1); 2350 1, 1);
2278 2351
2279 if (!entry || entry->offset >= end) { 2352 if (!entry || entry->offset >= end) {
2280 spin_unlock(&block_group->tree_lock); 2353 spin_unlock(&ctl->tree_lock);
2281 break; 2354 break;
2282 } 2355 }
2283 2356
2284 if (entry->bitmap) { 2357 if (entry->bitmap) {
2285 ret = search_bitmap(block_group, entry, &start, &bytes); 2358 ret = search_bitmap(ctl, entry, &start, &bytes);
2286 if (!ret) { 2359 if (!ret) {
2287 if (start >= end) { 2360 if (start >= end) {
2288 spin_unlock(&block_group->tree_lock); 2361 spin_unlock(&ctl->tree_lock);
2289 break; 2362 break;
2290 } 2363 }
2291 bytes = min(bytes, end - start); 2364 bytes = min(bytes, end - start);
2292 bitmap_clear_bits(block_group, entry, 2365 bitmap_clear_bits(ctl, entry, start, bytes);
2293 start, bytes);
2294 if (entry->bytes == 0) 2366 if (entry->bytes == 0)
2295 free_bitmap(block_group, entry); 2367 free_bitmap(ctl, entry);
2296 } else { 2368 } else {
2297 start = entry->offset + BITS_PER_BITMAP * 2369 start = entry->offset + BITS_PER_BITMAP *
2298 block_group->sectorsize; 2370 block_group->sectorsize;
2299 spin_unlock(&block_group->tree_lock); 2371 spin_unlock(&ctl->tree_lock);
2300 ret = 0; 2372 ret = 0;
2301 continue; 2373 continue;
2302 } 2374 }
2303 } else { 2375 } else {
2304 start = entry->offset; 2376 start = entry->offset;
2305 bytes = min(entry->bytes, end - start); 2377 bytes = min(entry->bytes, end - start);
2306 unlink_free_space(block_group, entry); 2378 unlink_free_space(ctl, entry);
2307 kmem_cache_free(btrfs_free_space_cachep, entry); 2379 kmem_cache_free(btrfs_free_space_cachep, entry);
2308 } 2380 }
2309 2381
2310 spin_unlock(&block_group->tree_lock); 2382 spin_unlock(&ctl->tree_lock);
2311 2383
2312 if (bytes >= minlen) { 2384 if (bytes >= minlen) {
2313 int update_ret; 2385 int update_ret;
@@ -2319,8 +2391,7 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
2319 bytes, 2391 bytes,
2320 &actually_trimmed); 2392 &actually_trimmed);
2321 2393
2322 btrfs_add_free_space(block_group, 2394 btrfs_add_free_space(block_group, start, bytes);
2323 start, bytes);
2324 if (!update_ret) 2395 if (!update_ret)
2325 btrfs_update_reserved_bytes(block_group, 2396 btrfs_update_reserved_bytes(block_group,
2326 bytes, 0, 1); 2397 bytes, 0, 1);
@@ -2342,3 +2413,145 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
2342 2413
2343 return ret; 2414 return ret;
2344} 2415}
2416
2417/*
2418 * Find the left-most item in the cache tree, and then return the
2419 * smallest inode number in the item.
2420 *
2421 * Note: the returned inode number may not be the smallest one in
2422 * the tree, if the left-most item is a bitmap.
2423 */
2424u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
2425{
2426 struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
2427 struct btrfs_free_space *entry = NULL;
2428 u64 ino = 0;
2429
2430 spin_lock(&ctl->tree_lock);
2431
2432 if (RB_EMPTY_ROOT(&ctl->free_space_offset))
2433 goto out;
2434
2435 entry = rb_entry(rb_first(&ctl->free_space_offset),
2436 struct btrfs_free_space, offset_index);
2437
2438 if (!entry->bitmap) {
2439 ino = entry->offset;
2440
2441 unlink_free_space(ctl, entry);
2442 entry->offset++;
2443 entry->bytes--;
2444 if (!entry->bytes)
2445 kmem_cache_free(btrfs_free_space_cachep, entry);
2446 else
2447 link_free_space(ctl, entry);
2448 } else {
2449 u64 offset = 0;
2450 u64 count = 1;
2451 int ret;
2452
2453 ret = search_bitmap(ctl, entry, &offset, &count);
2454 BUG_ON(ret);
2455
2456 ino = offset;
2457 bitmap_clear_bits(ctl, entry, offset, 1);
2458 if (entry->bytes == 0)
2459 free_bitmap(ctl, entry);
2460 }
2461out:
2462 spin_unlock(&ctl->tree_lock);
2463
2464 return ino;
2465}
2466
2467struct inode *lookup_free_ino_inode(struct btrfs_root *root,
2468 struct btrfs_path *path)
2469{
2470 struct inode *inode = NULL;
2471
2472 spin_lock(&root->cache_lock);
2473 if (root->cache_inode)
2474 inode = igrab(root->cache_inode);
2475 spin_unlock(&root->cache_lock);
2476 if (inode)
2477 return inode;
2478
2479 inode = __lookup_free_space_inode(root, path, 0);
2480 if (IS_ERR(inode))
2481 return inode;
2482
2483 spin_lock(&root->cache_lock);
2484 if (!root->fs_info->closing)
2485 root->cache_inode = igrab(inode);
2486 spin_unlock(&root->cache_lock);
2487
2488 return inode;
2489}
2490
2491int create_free_ino_inode(struct btrfs_root *root,
2492 struct btrfs_trans_handle *trans,
2493 struct btrfs_path *path)
2494{
2495 return __create_free_space_inode(root, trans, path,
2496 BTRFS_FREE_INO_OBJECTID, 0);
2497}
2498
2499int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2500{
2501 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2502 struct btrfs_path *path;
2503 struct inode *inode;
2504 int ret = 0;
2505 u64 root_gen = btrfs_root_generation(&root->root_item);
2506
2507 /*
2508 * If we're unmounting then just return, since this does a search on the
2509 * normal root and not the commit root and we could deadlock.
2510 */
2511 smp_mb();
2512 if (fs_info->closing)
2513 return 0;
2514
2515 path = btrfs_alloc_path();
2516 if (!path)
2517 return 0;
2518
2519 inode = lookup_free_ino_inode(root, path);
2520 if (IS_ERR(inode))
2521 goto out;
2522
2523 if (root_gen != BTRFS_I(inode)->generation)
2524 goto out_put;
2525
2526 ret = __load_free_space_cache(root, inode, ctl, path, 0);
2527
2528 if (ret < 0)
2529 printk(KERN_ERR "btrfs: failed to load free ino cache for "
2530 "root %llu\n", root->root_key.objectid);
2531out_put:
2532 iput(inode);
2533out:
2534 btrfs_free_path(path);
2535 return ret;
2536}
2537
2538int btrfs_write_out_ino_cache(struct btrfs_root *root,
2539 struct btrfs_trans_handle *trans,
2540 struct btrfs_path *path)
2541{
2542 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2543 struct inode *inode;
2544 int ret;
2545
2546 inode = lookup_free_ino_inode(root, path);
2547 if (IS_ERR(inode))
2548 return 0;
2549
2550 ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
2551 if (ret < 0)
2552 printk(KERN_ERR "btrfs: failed to write free ino cache "
2553 "for root %llu\n", root->root_key.objectid);
2554
2555 iput(inode);
2556 return ret;
2557}
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 65c3b935289f..8f2613f779ed 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -27,6 +27,25 @@ struct btrfs_free_space {
27 struct list_head list; 27 struct list_head list;
28}; 28};
29 29
30struct btrfs_free_space_ctl {
31 spinlock_t tree_lock;
32 struct rb_root free_space_offset;
33 u64 free_space;
34 int extents_thresh;
35 int free_extents;
36 int total_bitmaps;
37 int unit;
38 u64 start;
39 struct btrfs_free_space_op *op;
40 void *private;
41};
42
43struct btrfs_free_space_op {
44 void (*recalc_thresholds)(struct btrfs_free_space_ctl *ctl);
45 bool (*use_bitmap)(struct btrfs_free_space_ctl *ctl,
46 struct btrfs_free_space *info);
47};
48
30struct inode *lookup_free_space_inode(struct btrfs_root *root, 49struct inode *lookup_free_space_inode(struct btrfs_root *root,
31 struct btrfs_block_group_cache 50 struct btrfs_block_group_cache
32 *block_group, struct btrfs_path *path); 51 *block_group, struct btrfs_path *path);
@@ -45,17 +64,38 @@ int btrfs_write_out_cache(struct btrfs_root *root,
45 struct btrfs_trans_handle *trans, 64 struct btrfs_trans_handle *trans,
46 struct btrfs_block_group_cache *block_group, 65 struct btrfs_block_group_cache *block_group,
47 struct btrfs_path *path); 66 struct btrfs_path *path);
48int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, 67
49 u64 bytenr, u64 size); 68struct inode *lookup_free_ino_inode(struct btrfs_root *root,
69 struct btrfs_path *path);
70int create_free_ino_inode(struct btrfs_root *root,
71 struct btrfs_trans_handle *trans,
72 struct btrfs_path *path);
73int load_free_ino_cache(struct btrfs_fs_info *fs_info,
74 struct btrfs_root *root);
75int btrfs_write_out_ino_cache(struct btrfs_root *root,
76 struct btrfs_trans_handle *trans,
77 struct btrfs_path *path);
78
79void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group);
80int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
81 u64 bytenr, u64 size);
82static inline int
83btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
84 u64 bytenr, u64 size)
85{
86 return __btrfs_add_free_space(block_group->free_space_ctl,
87 bytenr, size);
88}
50int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, 89int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
51 u64 bytenr, u64 size); 90 u64 bytenr, u64 size);
91void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl);
52void btrfs_remove_free_space_cache(struct btrfs_block_group_cache 92void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
53 *block_group); 93 *block_group);
54u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, 94u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
55 u64 offset, u64 bytes, u64 empty_size); 95 u64 offset, u64 bytes, u64 empty_size);
96u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root);
56void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, 97void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
57 u64 bytes); 98 u64 bytes);
58u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group);
59int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, 99int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
60 struct btrfs_root *root, 100 struct btrfs_root *root,
61 struct btrfs_block_group_cache *block_group, 101 struct btrfs_block_group_cache *block_group,
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index 64f1150bb48d..baa74f3db691 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -130,7 +130,6 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
130 item_size - (ptr + sub_item_len - item_start)); 130 item_size - (ptr + sub_item_len - item_start));
131 ret = btrfs_truncate_item(trans, root, path, 131 ret = btrfs_truncate_item(trans, root, path,
132 item_size - sub_item_len, 1); 132 item_size - sub_item_len, 1);
133 BUG_ON(ret);
134out: 133out:
135 btrfs_free_path(path); 134 btrfs_free_path(path);
136 return ret; 135 return ret;
@@ -167,7 +166,6 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
167 166
168 old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); 167 old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
169 ret = btrfs_extend_item(trans, root, path, ins_len); 168 ret = btrfs_extend_item(trans, root, path, ins_len);
170 BUG_ON(ret);
171 ref = btrfs_item_ptr(path->nodes[0], path->slots[0], 169 ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
172 struct btrfs_inode_ref); 170 struct btrfs_inode_ref);
173 ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size); 171 ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index c05a08f4c411..3262cd17a12f 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -16,11 +16,446 @@
16 * Boston, MA 021110-1307, USA. 16 * Boston, MA 021110-1307, USA.
17 */ 17 */
18 18
19#include <linux/delay.h>
20#include <linux/kthread.h>
21#include <linux/pagemap.h>
22
19#include "ctree.h" 23#include "ctree.h"
20#include "disk-io.h" 24#include "disk-io.h"
25#include "free-space-cache.h"
26#include "inode-map.h"
21#include "transaction.h" 27#include "transaction.h"
22 28
23int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid) 29static int caching_kthread(void *data)
30{
31 struct btrfs_root *root = data;
32 struct btrfs_fs_info *fs_info = root->fs_info;
33 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
34 struct btrfs_key key;
35 struct btrfs_path *path;
36 struct extent_buffer *leaf;
37 u64 last = (u64)-1;
38 int slot;
39 int ret;
40
41 path = btrfs_alloc_path();
42 if (!path)
43 return -ENOMEM;
44
45 /* Since the commit root is read-only, we can safely skip locking. */
46 path->skip_locking = 1;
47 path->search_commit_root = 1;
48 path->reada = 2;
49
50 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
51 key.offset = 0;
52 key.type = BTRFS_INODE_ITEM_KEY;
53again:
54 /* need to make sure the commit_root doesn't disappear */
55 mutex_lock(&root->fs_commit_mutex);
56
57 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
58 if (ret < 0)
59 goto out;
60
61 while (1) {
62 smp_mb();
63 if (fs_info->closing)
64 goto out;
65
66 leaf = path->nodes[0];
67 slot = path->slots[0];
68 if (slot >= btrfs_header_nritems(leaf)) {
69 ret = btrfs_next_leaf(root, path);
70 if (ret < 0)
71 goto out;
72 else if (ret > 0)
73 break;
74
75 if (need_resched() ||
76 btrfs_transaction_in_commit(fs_info)) {
77 leaf = path->nodes[0];
78
79 if (btrfs_header_nritems(leaf) == 0) {
80 WARN_ON(1);
81 break;
82 }
83
84 /*
85 * Save the key so we can advances forward
86 * in the next search.
87 */
88 btrfs_item_key_to_cpu(leaf, &key, 0);
89 btrfs_release_path(path);
90 root->cache_progress = last;
91 mutex_unlock(&root->fs_commit_mutex);
92 schedule_timeout(1);
93 goto again;
94 } else
95 continue;
96 }
97
98 btrfs_item_key_to_cpu(leaf, &key, slot);
99
100 if (key.type != BTRFS_INODE_ITEM_KEY)
101 goto next;
102
103 if (key.objectid >= root->highest_objectid)
104 break;
105
106 if (last != (u64)-1 && last + 1 != key.objectid) {
107 __btrfs_add_free_space(ctl, last + 1,
108 key.objectid - last - 1);
109 wake_up(&root->cache_wait);
110 }
111
112 last = key.objectid;
113next:
114 path->slots[0]++;
115 }
116
117 if (last < root->highest_objectid - 1) {
118 __btrfs_add_free_space(ctl, last + 1,
119 root->highest_objectid - last - 1);
120 }
121
122 spin_lock(&root->cache_lock);
123 root->cached = BTRFS_CACHE_FINISHED;
124 spin_unlock(&root->cache_lock);
125
126 root->cache_progress = (u64)-1;
127 btrfs_unpin_free_ino(root);
128out:
129 wake_up(&root->cache_wait);
130 mutex_unlock(&root->fs_commit_mutex);
131
132 btrfs_free_path(path);
133
134 return ret;
135}
136
137static void start_caching(struct btrfs_root *root)
138{
139 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
140 struct task_struct *tsk;
141 int ret;
142 u64 objectid;
143
144 spin_lock(&root->cache_lock);
145 if (root->cached != BTRFS_CACHE_NO) {
146 spin_unlock(&root->cache_lock);
147 return;
148 }
149
150 root->cached = BTRFS_CACHE_STARTED;
151 spin_unlock(&root->cache_lock);
152
153 ret = load_free_ino_cache(root->fs_info, root);
154 if (ret == 1) {
155 spin_lock(&root->cache_lock);
156 root->cached = BTRFS_CACHE_FINISHED;
157 spin_unlock(&root->cache_lock);
158 return;
159 }
160
161 /*
162 * It can be quite time-consuming to fill the cache by searching
163 * through the extent tree, and this can keep ino allocation path
164 * waiting. Therefore at start we quickly find out the highest
165 * inode number and we know we can use inode numbers which fall in
166 * [highest_ino + 1, BTRFS_LAST_FREE_OBJECTID].
167 */
168 ret = btrfs_find_free_objectid(root, &objectid);
169 if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) {
170 __btrfs_add_free_space(ctl, objectid,
171 BTRFS_LAST_FREE_OBJECTID - objectid + 1);
172 }
173
174 tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n",
175 root->root_key.objectid);
176 BUG_ON(IS_ERR(tsk));
177}
178
179int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
180{
181again:
182 *objectid = btrfs_find_ino_for_alloc(root);
183
184 if (*objectid != 0)
185 return 0;
186
187 start_caching(root);
188
189 wait_event(root->cache_wait,
190 root->cached == BTRFS_CACHE_FINISHED ||
191 root->free_ino_ctl->free_space > 0);
192
193 if (root->cached == BTRFS_CACHE_FINISHED &&
194 root->free_ino_ctl->free_space == 0)
195 return -ENOSPC;
196 else
197 goto again;
198}
199
200void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
201{
202 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
203 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
204again:
205 if (root->cached == BTRFS_CACHE_FINISHED) {
206 __btrfs_add_free_space(ctl, objectid, 1);
207 } else {
208 /*
209 * If we are in the process of caching free ino chunks,
210 * to avoid adding the same inode number to the free_ino
211 * tree twice due to cross transaction, we'll leave it
212 * in the pinned tree until a transaction is committed
213 * or the caching work is done.
214 */
215
216 mutex_lock(&root->fs_commit_mutex);
217 spin_lock(&root->cache_lock);
218 if (root->cached == BTRFS_CACHE_FINISHED) {
219 spin_unlock(&root->cache_lock);
220 mutex_unlock(&root->fs_commit_mutex);
221 goto again;
222 }
223 spin_unlock(&root->cache_lock);
224
225 start_caching(root);
226
227 if (objectid <= root->cache_progress ||
228 objectid > root->highest_objectid)
229 __btrfs_add_free_space(ctl, objectid, 1);
230 else
231 __btrfs_add_free_space(pinned, objectid, 1);
232
233 mutex_unlock(&root->fs_commit_mutex);
234 }
235}
236
237/*
238 * When a transaction is committed, we'll move those inode numbers which
239 * are smaller than root->cache_progress from pinned tree to free_ino tree,
240 * and others will just be dropped, because the commit root we were
241 * searching has changed.
242 *
243 * Must be called with root->fs_commit_mutex held
244 */
245void btrfs_unpin_free_ino(struct btrfs_root *root)
246{
247 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
248 struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
249 struct btrfs_free_space *info;
250 struct rb_node *n;
251 u64 count;
252
253 while (1) {
254 n = rb_first(rbroot);
255 if (!n)
256 break;
257
258 info = rb_entry(n, struct btrfs_free_space, offset_index);
259 BUG_ON(info->bitmap);
260
261 if (info->offset > root->cache_progress)
262 goto free;
263 else if (info->offset + info->bytes > root->cache_progress)
264 count = root->cache_progress - info->offset + 1;
265 else
266 count = info->bytes;
267
268 __btrfs_add_free_space(ctl, info->offset, count);
269free:
270 rb_erase(&info->offset_index, rbroot);
271 kfree(info);
272 }
273}
274
275#define INIT_THRESHOLD (((1024 * 32) / 2) / sizeof(struct btrfs_free_space))
276#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8)
277
278/*
279 * The goal is to keep the memory used by the free_ino tree won't
280 * exceed the memory if we use bitmaps only.
281 */
282static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
283{
284 struct btrfs_free_space *info;
285 struct rb_node *n;
286 int max_ino;
287 int max_bitmaps;
288
289 n = rb_last(&ctl->free_space_offset);
290 if (!n) {
291 ctl->extents_thresh = INIT_THRESHOLD;
292 return;
293 }
294 info = rb_entry(n, struct btrfs_free_space, offset_index);
295
296 /*
297 * Find the maximum inode number in the filesystem. Note we
298 * ignore the fact that this can be a bitmap, because we are
299 * not doing precise calculation.
300 */
301 max_ino = info->bytes - 1;
302
303 max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP;
304 if (max_bitmaps <= ctl->total_bitmaps) {
305 ctl->extents_thresh = 0;
306 return;
307 }
308
309 ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
310 PAGE_CACHE_SIZE / sizeof(*info);
311}
312
313/*
314 * We don't fall back to bitmap, if we are below the extents threshold
315 * or this chunk of inode numbers is a big one.
316 */
317static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
318 struct btrfs_free_space *info)
319{
320 if (ctl->free_extents < ctl->extents_thresh ||
321 info->bytes > INODES_PER_BITMAP / 10)
322 return false;
323
324 return true;
325}
326
327static struct btrfs_free_space_op free_ino_op = {
328 .recalc_thresholds = recalculate_thresholds,
329 .use_bitmap = use_bitmap,
330};
331
332static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl)
333{
334}
335
336static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl,
337 struct btrfs_free_space *info)
338{
339 /*
340 * We always use extents for two reasons:
341 *
342 * - The pinned tree is only used during the process of caching
343 * work.
344 * - Make code simpler. See btrfs_unpin_free_ino().
345 */
346 return false;
347}
348
349static struct btrfs_free_space_op pinned_free_ino_op = {
350 .recalc_thresholds = pinned_recalc_thresholds,
351 .use_bitmap = pinned_use_bitmap,
352};
353
354void btrfs_init_free_ino_ctl(struct btrfs_root *root)
355{
356 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
357 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
358
359 spin_lock_init(&ctl->tree_lock);
360 ctl->unit = 1;
361 ctl->start = 0;
362 ctl->private = NULL;
363 ctl->op = &free_ino_op;
364
365 /*
366 * Initially we allow to use 16K of ram to cache chunks of
367 * inode numbers before we resort to bitmaps. This is somewhat
368 * arbitrary, but it will be adjusted in runtime.
369 */
370 ctl->extents_thresh = INIT_THRESHOLD;
371
372 spin_lock_init(&pinned->tree_lock);
373 pinned->unit = 1;
374 pinned->start = 0;
375 pinned->private = NULL;
376 pinned->extents_thresh = 0;
377 pinned->op = &pinned_free_ino_op;
378}
379
380int btrfs_save_ino_cache(struct btrfs_root *root,
381 struct btrfs_trans_handle *trans)
382{
383 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
384 struct btrfs_path *path;
385 struct inode *inode;
386 u64 alloc_hint = 0;
387 int ret;
388 int prealloc;
389 bool retry = false;
390
391 path = btrfs_alloc_path();
392 if (!path)
393 return -ENOMEM;
394again:
395 inode = lookup_free_ino_inode(root, path);
396 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
397 ret = PTR_ERR(inode);
398 goto out;
399 }
400
401 if (IS_ERR(inode)) {
402 BUG_ON(retry);
403 retry = true;
404
405 ret = create_free_ino_inode(root, trans, path);
406 if (ret)
407 goto out;
408 goto again;
409 }
410
411 BTRFS_I(inode)->generation = 0;
412 ret = btrfs_update_inode(trans, root, inode);
413 WARN_ON(ret);
414
415 if (i_size_read(inode) > 0) {
416 ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
417 if (ret)
418 goto out_put;
419 }
420
421 spin_lock(&root->cache_lock);
422 if (root->cached != BTRFS_CACHE_FINISHED) {
423 ret = -1;
424 spin_unlock(&root->cache_lock);
425 goto out_put;
426 }
427 spin_unlock(&root->cache_lock);
428
429 spin_lock(&ctl->tree_lock);
430 prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
431 prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE);
432 prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE;
433 spin_unlock(&ctl->tree_lock);
434
435 /* Just to make sure we have enough space */
436 prealloc += 8 * PAGE_CACHE_SIZE;
437
438 ret = btrfs_check_data_free_space(inode, prealloc);
439 if (ret)
440 goto out_put;
441
442 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
443 prealloc, prealloc, &alloc_hint);
444 if (ret)
445 goto out_put;
446 btrfs_free_reserved_data_space(inode, prealloc);
447
448out_put:
449 iput(inode);
450out:
451 if (ret == 0)
452 ret = btrfs_write_out_ino_cache(root, trans, path);
453
454 btrfs_free_path(path);
455 return ret;
456}
457
458static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
24{ 459{
25 struct btrfs_path *path; 460 struct btrfs_path *path;
26 int ret; 461 int ret;
@@ -55,15 +490,14 @@ error:
55 return ret; 490 return ret;
56} 491}
57 492
58int btrfs_find_free_objectid(struct btrfs_trans_handle *trans, 493int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
59 struct btrfs_root *root,
60 u64 dirid, u64 *objectid)
61{ 494{
62 int ret; 495 int ret;
63 mutex_lock(&root->objectid_mutex); 496 mutex_lock(&root->objectid_mutex);
64 497
65 if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) { 498 if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) {
66 ret = btrfs_find_highest_inode(root, &root->highest_objectid); 499 ret = btrfs_find_highest_objectid(root,
500 &root->highest_objectid);
67 if (ret) 501 if (ret)
68 goto out; 502 goto out;
69 } 503 }
diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h
new file mode 100644
index 000000000000..ddb347bfee23
--- /dev/null
+++ b/fs/btrfs/inode-map.h
@@ -0,0 +1,13 @@
1#ifndef __BTRFS_INODE_MAP
2#define __BTRFS_INODE_MAP
3
4void btrfs_init_free_ino_ctl(struct btrfs_root *root);
5void btrfs_unpin_free_ino(struct btrfs_root *root);
6void btrfs_return_ino(struct btrfs_root *root, u64 objectid);
7int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid);
8int btrfs_save_ino_cache(struct btrfs_root *root,
9 struct btrfs_trans_handle *trans);
10
11int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid);
12
13#endif
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 7cd8ab0ef04d..bb51bb1fa44f 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -37,6 +37,7 @@
37#include <linux/posix_acl.h> 37#include <linux/posix_acl.h>
38#include <linux/falloc.h> 38#include <linux/falloc.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/ratelimit.h>
40#include "compat.h" 41#include "compat.h"
41#include "ctree.h" 42#include "ctree.h"
42#include "disk-io.h" 43#include "disk-io.h"
@@ -51,6 +52,7 @@
51#include "compression.h" 52#include "compression.h"
52#include "locking.h" 53#include "locking.h"
53#include "free-space-cache.h" 54#include "free-space-cache.h"
55#include "inode-map.h"
54 56
55struct btrfs_iget_args { 57struct btrfs_iget_args {
56 u64 ino; 58 u64 ino;
@@ -138,7 +140,7 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
138 path->leave_spinning = 1; 140 path->leave_spinning = 1;
139 btrfs_set_trans_block_group(trans, inode); 141 btrfs_set_trans_block_group(trans, inode);
140 142
141 key.objectid = inode->i_ino; 143 key.objectid = btrfs_ino(inode);
142 key.offset = start; 144 key.offset = start;
143 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); 145 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
144 datasize = btrfs_file_extent_calc_inline_size(cur_size); 146 datasize = btrfs_file_extent_calc_inline_size(cur_size);
@@ -340,6 +342,10 @@ static noinline int compress_file_range(struct inode *inode,
340 int will_compress; 342 int will_compress;
341 int compress_type = root->fs_info->compress_type; 343 int compress_type = root->fs_info->compress_type;
342 344
345 /* if this is a small write inside eof, kick off a defragbot */
346 if (end <= BTRFS_I(inode)->disk_i_size && (end - start + 1) < 16 * 1024)
347 btrfs_add_inode_defrag(NULL, inode);
348
343 actual_end = min_t(u64, isize, end + 1); 349 actual_end = min_t(u64, isize, end + 1);
344again: 350again:
345 will_compress = 0; 351 will_compress = 0;
@@ -649,7 +655,7 @@ retry:
649 async_extent->start + 655 async_extent->start +
650 async_extent->ram_size - 1, 0); 656 async_extent->ram_size - 1, 0);
651 657
652 em = alloc_extent_map(GFP_NOFS); 658 em = alloc_extent_map();
653 BUG_ON(!em); 659 BUG_ON(!em);
654 em->start = async_extent->start; 660 em->start = async_extent->start;
655 em->len = async_extent->ram_size; 661 em->len = async_extent->ram_size;
@@ -745,6 +751,15 @@ static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
745 return alloc_hint; 751 return alloc_hint;
746} 752}
747 753
754static inline bool is_free_space_inode(struct btrfs_root *root,
755 struct inode *inode)
756{
757 if (root == root->fs_info->tree_root ||
758 BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID)
759 return true;
760 return false;
761}
762
748/* 763/*
749 * when extent_io.c finds a delayed allocation range in the file, 764 * when extent_io.c finds a delayed allocation range in the file,
750 * the call backs end up in this code. The basic idea is to 765 * the call backs end up in this code. The basic idea is to
@@ -777,7 +792,7 @@ static noinline int cow_file_range(struct inode *inode,
777 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 792 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
778 int ret = 0; 793 int ret = 0;
779 794
780 BUG_ON(root == root->fs_info->tree_root); 795 BUG_ON(is_free_space_inode(root, inode));
781 trans = btrfs_join_transaction(root, 1); 796 trans = btrfs_join_transaction(root, 1);
782 BUG_ON(IS_ERR(trans)); 797 BUG_ON(IS_ERR(trans));
783 btrfs_set_trans_block_group(trans, inode); 798 btrfs_set_trans_block_group(trans, inode);
@@ -788,6 +803,10 @@ static noinline int cow_file_range(struct inode *inode,
788 disk_num_bytes = num_bytes; 803 disk_num_bytes = num_bytes;
789 ret = 0; 804 ret = 0;
790 805
806 /* if this is a small write inside eof, kick off defrag */
807 if (end <= BTRFS_I(inode)->disk_i_size && num_bytes < 64 * 1024)
808 btrfs_add_inode_defrag(trans, inode);
809
791 if (start == 0) { 810 if (start == 0) {
792 /* lets try to make an inline extent */ 811 /* lets try to make an inline extent */
793 ret = cow_file_range_inline(trans, root, inode, 812 ret = cow_file_range_inline(trans, root, inode,
@@ -826,7 +845,7 @@ static noinline int cow_file_range(struct inode *inode,
826 (u64)-1, &ins, 1); 845 (u64)-1, &ins, 1);
827 BUG_ON(ret); 846 BUG_ON(ret);
828 847
829 em = alloc_extent_map(GFP_NOFS); 848 em = alloc_extent_map();
830 BUG_ON(!em); 849 BUG_ON(!em);
831 em->start = start; 850 em->start = start;
832 em->orig_start = em->start; 851 em->orig_start = em->start;
@@ -1008,7 +1027,7 @@ static noinline int csum_exist_in_range(struct btrfs_root *root,
1008 LIST_HEAD(list); 1027 LIST_HEAD(list);
1009 1028
1010 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr, 1029 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
1011 bytenr + num_bytes - 1, &list); 1030 bytenr + num_bytes - 1, &list, 0);
1012 if (ret == 0 && list_empty(&list)) 1031 if (ret == 0 && list_empty(&list))
1013 return 0; 1032 return 0;
1014 1033
@@ -1049,29 +1068,31 @@ static noinline int run_delalloc_nocow(struct inode *inode,
1049 int type; 1068 int type;
1050 int nocow; 1069 int nocow;
1051 int check_prev = 1; 1070 int check_prev = 1;
1052 bool nolock = false; 1071 bool nolock;
1072 u64 ino = btrfs_ino(inode);
1053 1073
1054 path = btrfs_alloc_path(); 1074 path = btrfs_alloc_path();
1055 BUG_ON(!path); 1075 BUG_ON(!path);
1056 if (root == root->fs_info->tree_root) { 1076
1057 nolock = true; 1077 nolock = is_free_space_inode(root, inode);
1078
1079 if (nolock)
1058 trans = btrfs_join_transaction_nolock(root, 1); 1080 trans = btrfs_join_transaction_nolock(root, 1);
1059 } else { 1081 else
1060 trans = btrfs_join_transaction(root, 1); 1082 trans = btrfs_join_transaction(root, 1);
1061 }
1062 BUG_ON(IS_ERR(trans)); 1083 BUG_ON(IS_ERR(trans));
1063 1084
1064 cow_start = (u64)-1; 1085 cow_start = (u64)-1;
1065 cur_offset = start; 1086 cur_offset = start;
1066 while (1) { 1087 while (1) {
1067 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, 1088 ret = btrfs_lookup_file_extent(trans, root, path, ino,
1068 cur_offset, 0); 1089 cur_offset, 0);
1069 BUG_ON(ret < 0); 1090 BUG_ON(ret < 0);
1070 if (ret > 0 && path->slots[0] > 0 && check_prev) { 1091 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1071 leaf = path->nodes[0]; 1092 leaf = path->nodes[0];
1072 btrfs_item_key_to_cpu(leaf, &found_key, 1093 btrfs_item_key_to_cpu(leaf, &found_key,
1073 path->slots[0] - 1); 1094 path->slots[0] - 1);
1074 if (found_key.objectid == inode->i_ino && 1095 if (found_key.objectid == ino &&
1075 found_key.type == BTRFS_EXTENT_DATA_KEY) 1096 found_key.type == BTRFS_EXTENT_DATA_KEY)
1076 path->slots[0]--; 1097 path->slots[0]--;
1077 } 1098 }
@@ -1092,7 +1113,7 @@ next_slot:
1092 num_bytes = 0; 1113 num_bytes = 0;
1093 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1114 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1094 1115
1095 if (found_key.objectid > inode->i_ino || 1116 if (found_key.objectid > ino ||
1096 found_key.type > BTRFS_EXTENT_DATA_KEY || 1117 found_key.type > BTRFS_EXTENT_DATA_KEY ||
1097 found_key.offset > end) 1118 found_key.offset > end)
1098 break; 1119 break;
@@ -1127,7 +1148,7 @@ next_slot:
1127 goto out_check; 1148 goto out_check;
1128 if (btrfs_extent_readonly(root, disk_bytenr)) 1149 if (btrfs_extent_readonly(root, disk_bytenr))
1129 goto out_check; 1150 goto out_check;
1130 if (btrfs_cross_ref_exist(trans, root, inode->i_ino, 1151 if (btrfs_cross_ref_exist(trans, root, ino,
1131 found_key.offset - 1152 found_key.offset -
1132 extent_offset, disk_bytenr)) 1153 extent_offset, disk_bytenr))
1133 goto out_check; 1154 goto out_check;
@@ -1164,7 +1185,7 @@ out_check:
1164 goto next_slot; 1185 goto next_slot;
1165 } 1186 }
1166 1187
1167 btrfs_release_path(root, path); 1188 btrfs_release_path(path);
1168 if (cow_start != (u64)-1) { 1189 if (cow_start != (u64)-1) {
1169 ret = cow_file_range(inode, locked_page, cow_start, 1190 ret = cow_file_range(inode, locked_page, cow_start,
1170 found_key.offset - 1, page_started, 1191 found_key.offset - 1, page_started,
@@ -1177,7 +1198,7 @@ out_check:
1177 struct extent_map *em; 1198 struct extent_map *em;
1178 struct extent_map_tree *em_tree; 1199 struct extent_map_tree *em_tree;
1179 em_tree = &BTRFS_I(inode)->extent_tree; 1200 em_tree = &BTRFS_I(inode)->extent_tree;
1180 em = alloc_extent_map(GFP_NOFS); 1201 em = alloc_extent_map();
1181 BUG_ON(!em); 1202 BUG_ON(!em);
1182 em->start = cur_offset; 1203 em->start = cur_offset;
1183 em->orig_start = em->start; 1204 em->orig_start = em->start;
@@ -1222,7 +1243,7 @@ out_check:
1222 if (cur_offset > end) 1243 if (cur_offset > end)
1223 break; 1244 break;
1224 } 1245 }
1225 btrfs_release_path(root, path); 1246 btrfs_release_path(path);
1226 1247
1227 if (cur_offset <= end && cow_start == (u64)-1) 1248 if (cur_offset <= end && cow_start == (u64)-1)
1228 cow_start = cur_offset; 1249 cow_start = cur_offset;
@@ -1310,14 +1331,13 @@ static int btrfs_set_bit_hook(struct inode *inode,
1310 1331
1311 /* 1332 /*
1312 * set_bit and clear bit hooks normally require _irqsave/restore 1333 * set_bit and clear bit hooks normally require _irqsave/restore
1313 * but in this case, we are only testeing for the DELALLOC 1334 * but in this case, we are only testing for the DELALLOC
1314 * bit, which is only set or cleared with irqs on 1335 * bit, which is only set or cleared with irqs on
1315 */ 1336 */
1316 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 1337 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1317 struct btrfs_root *root = BTRFS_I(inode)->root; 1338 struct btrfs_root *root = BTRFS_I(inode)->root;
1318 u64 len = state->end + 1 - state->start; 1339 u64 len = state->end + 1 - state->start;
1319 int do_list = (root->root_key.objectid != 1340 bool do_list = !is_free_space_inode(root, inode);
1320 BTRFS_ROOT_TREE_OBJECTID);
1321 1341
1322 if (*bits & EXTENT_FIRST_DELALLOC) 1342 if (*bits & EXTENT_FIRST_DELALLOC)
1323 *bits &= ~EXTENT_FIRST_DELALLOC; 1343 *bits &= ~EXTENT_FIRST_DELALLOC;
@@ -1344,14 +1364,13 @@ static int btrfs_clear_bit_hook(struct inode *inode,
1344{ 1364{
1345 /* 1365 /*
1346 * set_bit and clear bit hooks normally require _irqsave/restore 1366 * set_bit and clear bit hooks normally require _irqsave/restore
1347 * but in this case, we are only testeing for the DELALLOC 1367 * but in this case, we are only testing for the DELALLOC
1348 * bit, which is only set or cleared with irqs on 1368 * bit, which is only set or cleared with irqs on
1349 */ 1369 */
1350 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 1370 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1351 struct btrfs_root *root = BTRFS_I(inode)->root; 1371 struct btrfs_root *root = BTRFS_I(inode)->root;
1352 u64 len = state->end + 1 - state->start; 1372 u64 len = state->end + 1 - state->start;
1353 int do_list = (root->root_key.objectid != 1373 bool do_list = !is_free_space_inode(root, inode);
1354 BTRFS_ROOT_TREE_OBJECTID);
1355 1374
1356 if (*bits & EXTENT_FIRST_DELALLOC) 1375 if (*bits & EXTENT_FIRST_DELALLOC)
1357 *bits &= ~EXTENT_FIRST_DELALLOC; 1376 *bits &= ~EXTENT_FIRST_DELALLOC;
@@ -1458,7 +1477,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1458 1477
1459 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 1478 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1460 1479
1461 if (root == root->fs_info->tree_root) 1480 if (is_free_space_inode(root, inode))
1462 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2); 1481 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2);
1463 else 1482 else
1464 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 1483 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
@@ -1644,7 +1663,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1644 &hint, 0); 1663 &hint, 0);
1645 BUG_ON(ret); 1664 BUG_ON(ret);
1646 1665
1647 ins.objectid = inode->i_ino; 1666 ins.objectid = btrfs_ino(inode);
1648 ins.offset = file_pos; 1667 ins.offset = file_pos;
1649 ins.type = BTRFS_EXTENT_DATA_KEY; 1668 ins.type = BTRFS_EXTENT_DATA_KEY;
1650 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi)); 1669 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
@@ -1675,7 +1694,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1675 ins.type = BTRFS_EXTENT_ITEM_KEY; 1694 ins.type = BTRFS_EXTENT_ITEM_KEY;
1676 ret = btrfs_alloc_reserved_file_extent(trans, root, 1695 ret = btrfs_alloc_reserved_file_extent(trans, root,
1677 root->root_key.objectid, 1696 root->root_key.objectid,
1678 inode->i_ino, file_pos, &ins); 1697 btrfs_ino(inode), file_pos, &ins);
1679 BUG_ON(ret); 1698 BUG_ON(ret);
1680 btrfs_free_path(path); 1699 btrfs_free_path(path);
1681 1700
@@ -1701,7 +1720,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1701 struct extent_state *cached_state = NULL; 1720 struct extent_state *cached_state = NULL;
1702 int compress_type = 0; 1721 int compress_type = 0;
1703 int ret; 1722 int ret;
1704 bool nolock = false; 1723 bool nolock;
1705 1724
1706 ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, 1725 ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
1707 end - start + 1); 1726 end - start + 1);
@@ -1709,7 +1728,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1709 return 0; 1728 return 0;
1710 BUG_ON(!ordered_extent); 1729 BUG_ON(!ordered_extent);
1711 1730
1712 nolock = (root == root->fs_info->tree_root); 1731 nolock = is_free_space_inode(root, inode);
1713 1732
1714 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 1733 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1715 BUG_ON(!list_empty(&ordered_extent->list)); 1734 BUG_ON(!list_empty(&ordered_extent->list));
@@ -1855,7 +1874,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
1855 } 1874 }
1856 read_unlock(&em_tree->lock); 1875 read_unlock(&em_tree->lock);
1857 1876
1858 if (!em || IS_ERR(em)) { 1877 if (IS_ERR_OR_NULL(em)) {
1859 kfree(failrec); 1878 kfree(failrec);
1860 return -EIO; 1879 return -EIO;
1861 } 1880 }
@@ -2004,12 +2023,11 @@ good:
2004 return 0; 2023 return 0;
2005 2024
2006zeroit: 2025zeroit:
2007 if (printk_ratelimit()) { 2026 printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
2008 printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u " 2027 "private %llu\n",
2009 "private %llu\n", page->mapping->host->i_ino, 2028 (unsigned long long)btrfs_ino(page->mapping->host),
2010 (unsigned long long)start, csum, 2029 (unsigned long long)start, csum,
2011 (unsigned long long)private); 2030 (unsigned long long)private);
2012 }
2013 memset(kaddr + offset, 1, end - start + 1); 2031 memset(kaddr + offset, 1, end - start + 1);
2014 flush_dcache_page(page); 2032 flush_dcache_page(page);
2015 kunmap_atomic(kaddr, KM_USER0); 2033 kunmap_atomic(kaddr, KM_USER0);
@@ -2244,7 +2262,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2244 2262
2245 /* insert an orphan item to track this unlinked/truncated file */ 2263 /* insert an orphan item to track this unlinked/truncated file */
2246 if (insert >= 1) { 2264 if (insert >= 1) {
2247 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino); 2265 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
2248 BUG_ON(ret); 2266 BUG_ON(ret);
2249 } 2267 }
2250 2268
@@ -2281,7 +2299,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2281 spin_unlock(&root->orphan_lock); 2299 spin_unlock(&root->orphan_lock);
2282 2300
2283 if (trans && delete_item) { 2301 if (trans && delete_item) {
2284 ret = btrfs_del_orphan_item(trans, root, inode->i_ino); 2302 ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
2285 BUG_ON(ret); 2303 BUG_ON(ret);
2286 } 2304 }
2287 2305
@@ -2346,7 +2364,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
2346 break; 2364 break;
2347 2365
2348 /* release the path since we're done with it */ 2366 /* release the path since we're done with it */
2349 btrfs_release_path(root, path); 2367 btrfs_release_path(path);
2350 2368
2351 /* 2369 /*
2352 * this is where we are basically btrfs_lookup, without the 2370 * this is where we are basically btrfs_lookup, without the
@@ -2543,7 +2561,8 @@ static void btrfs_read_locked_inode(struct inode *inode)
2543 * try to precache a NULL acl entry for files that don't have 2561 * try to precache a NULL acl entry for files that don't have
2544 * any xattrs or acls 2562 * any xattrs or acls
2545 */ 2563 */
2546 maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino); 2564 maybe_acls = acls_after_inode_item(leaf, path->slots[0],
2565 btrfs_ino(inode));
2547 if (!maybe_acls) 2566 if (!maybe_acls)
2548 cache_no_acl(inode); 2567 cache_no_acl(inode);
2549 2568
@@ -2647,11 +2666,26 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2647 struct extent_buffer *leaf; 2666 struct extent_buffer *leaf;
2648 int ret; 2667 int ret;
2649 2668
2669 /*
2670 * If root is tree root, it means this inode is used to
2671 * store free space information. And these inodes are updated
2672 * when committing the transaction, so they needn't delaye to
2673 * be updated, or deadlock will occured.
2674 */
2675 if (!is_free_space_inode(root, inode)) {
2676 ret = btrfs_delayed_update_inode(trans, root, inode);
2677 if (!ret)
2678 btrfs_set_inode_last_trans(trans, inode);
2679 return ret;
2680 }
2681
2650 path = btrfs_alloc_path(); 2682 path = btrfs_alloc_path();
2651 BUG_ON(!path); 2683 if (!path)
2684 return -ENOMEM;
2685
2652 path->leave_spinning = 1; 2686 path->leave_spinning = 1;
2653 ret = btrfs_lookup_inode(trans, root, path, 2687 ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
2654 &BTRFS_I(inode)->location, 1); 2688 1);
2655 if (ret) { 2689 if (ret) {
2656 if (ret > 0) 2690 if (ret > 0)
2657 ret = -ENOENT; 2691 ret = -ENOENT;
@@ -2661,7 +2695,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2661 btrfs_unlock_up_safe(path, 1); 2695 btrfs_unlock_up_safe(path, 1);
2662 leaf = path->nodes[0]; 2696 leaf = path->nodes[0];
2663 inode_item = btrfs_item_ptr(leaf, path->slots[0], 2697 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2664 struct btrfs_inode_item); 2698 struct btrfs_inode_item);
2665 2699
2666 fill_inode_item(trans, leaf, inode_item, inode); 2700 fill_inode_item(trans, leaf, inode_item, inode);
2667 btrfs_mark_buffer_dirty(leaf); 2701 btrfs_mark_buffer_dirty(leaf);
@@ -2672,7 +2706,6 @@ failed:
2672 return ret; 2706 return ret;
2673} 2707}
2674 2708
2675
2676/* 2709/*
2677 * unlink helper that gets used here in inode.c and in the tree logging 2710 * unlink helper that gets used here in inode.c and in the tree logging
2678 * recovery code. It remove a link in a directory with a given name, and 2711 * recovery code. It remove a link in a directory with a given name, and
@@ -2689,6 +2722,8 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2689 struct btrfs_dir_item *di; 2722 struct btrfs_dir_item *di;
2690 struct btrfs_key key; 2723 struct btrfs_key key;
2691 u64 index; 2724 u64 index;
2725 u64 ino = btrfs_ino(inode);
2726 u64 dir_ino = btrfs_ino(dir);
2692 2727
2693 path = btrfs_alloc_path(); 2728 path = btrfs_alloc_path();
2694 if (!path) { 2729 if (!path) {
@@ -2697,7 +2732,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2697 } 2732 }
2698 2733
2699 path->leave_spinning = 1; 2734 path->leave_spinning = 1;
2700 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, 2735 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2701 name, name_len, -1); 2736 name, name_len, -1);
2702 if (IS_ERR(di)) { 2737 if (IS_ERR(di)) {
2703 ret = PTR_ERR(di); 2738 ret = PTR_ERR(di);
@@ -2712,33 +2747,23 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2712 ret = btrfs_delete_one_dir_name(trans, root, path, di); 2747 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2713 if (ret) 2748 if (ret)
2714 goto err; 2749 goto err;
2715 btrfs_release_path(root, path); 2750 btrfs_release_path(path);
2716 2751
2717 ret = btrfs_del_inode_ref(trans, root, name, name_len, 2752 ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
2718 inode->i_ino, 2753 dir_ino, &index);
2719 dir->i_ino, &index);
2720 if (ret) { 2754 if (ret) {
2721 printk(KERN_INFO "btrfs failed to delete reference to %.*s, " 2755 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2722 "inode %lu parent %lu\n", name_len, name, 2756 "inode %llu parent %llu\n", name_len, name,
2723 inode->i_ino, dir->i_ino); 2757 (unsigned long long)ino, (unsigned long long)dir_ino);
2724 goto err; 2758 goto err;
2725 } 2759 }
2726 2760
2727 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, 2761 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
2728 index, name, name_len, -1); 2762 if (ret)
2729 if (IS_ERR(di)) {
2730 ret = PTR_ERR(di);
2731 goto err;
2732 }
2733 if (!di) {
2734 ret = -ENOENT;
2735 goto err; 2763 goto err;
2736 }
2737 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2738 btrfs_release_path(root, path);
2739 2764
2740 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, 2765 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2741 inode, dir->i_ino); 2766 inode, dir_ino);
2742 BUG_ON(ret != 0 && ret != -ENOENT); 2767 BUG_ON(ret != 0 && ret != -ENOENT);
2743 2768
2744 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, 2769 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
@@ -2816,12 +2841,14 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2816 int check_link = 1; 2841 int check_link = 1;
2817 int err = -ENOSPC; 2842 int err = -ENOSPC;
2818 int ret; 2843 int ret;
2844 u64 ino = btrfs_ino(inode);
2845 u64 dir_ino = btrfs_ino(dir);
2819 2846
2820 trans = btrfs_start_transaction(root, 10); 2847 trans = btrfs_start_transaction(root, 10);
2821 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) 2848 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
2822 return trans; 2849 return trans;
2823 2850
2824 if (inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 2851 if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
2825 return ERR_PTR(-ENOSPC); 2852 return ERR_PTR(-ENOSPC);
2826 2853
2827 /* check if there is someone else holds reference */ 2854 /* check if there is someone else holds reference */
@@ -2862,7 +2889,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2862 } else { 2889 } else {
2863 check_link = 0; 2890 check_link = 0;
2864 } 2891 }
2865 btrfs_release_path(root, path); 2892 btrfs_release_path(path);
2866 2893
2867 ret = btrfs_lookup_inode(trans, root, path, 2894 ret = btrfs_lookup_inode(trans, root, path,
2868 &BTRFS_I(inode)->location, 0); 2895 &BTRFS_I(inode)->location, 0);
@@ -2876,11 +2903,11 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2876 } else { 2903 } else {
2877 check_link = 0; 2904 check_link = 0;
2878 } 2905 }
2879 btrfs_release_path(root, path); 2906 btrfs_release_path(path);
2880 2907
2881 if (ret == 0 && S_ISREG(inode->i_mode)) { 2908 if (ret == 0 && S_ISREG(inode->i_mode)) {
2882 ret = btrfs_lookup_file_extent(trans, root, path, 2909 ret = btrfs_lookup_file_extent(trans, root, path,
2883 inode->i_ino, (u64)-1, 0); 2910 ino, (u64)-1, 0);
2884 if (ret < 0) { 2911 if (ret < 0) {
2885 err = ret; 2912 err = ret;
2886 goto out; 2913 goto out;
@@ -2888,7 +2915,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2888 BUG_ON(ret == 0); 2915 BUG_ON(ret == 0);
2889 if (check_path_shared(root, path)) 2916 if (check_path_shared(root, path))
2890 goto out; 2917 goto out;
2891 btrfs_release_path(root, path); 2918 btrfs_release_path(path);
2892 } 2919 }
2893 2920
2894 if (!check_link) { 2921 if (!check_link) {
@@ -2896,7 +2923,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2896 goto out; 2923 goto out;
2897 } 2924 }
2898 2925
2899 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, 2926 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2900 dentry->d_name.name, dentry->d_name.len, 0); 2927 dentry->d_name.name, dentry->d_name.len, 0);
2901 if (IS_ERR(di)) { 2928 if (IS_ERR(di)) {
2902 err = PTR_ERR(di); 2929 err = PTR_ERR(di);
@@ -2909,11 +2936,11 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2909 err = 0; 2936 err = 0;
2910 goto out; 2937 goto out;
2911 } 2938 }
2912 btrfs_release_path(root, path); 2939 btrfs_release_path(path);
2913 2940
2914 ref = btrfs_lookup_inode_ref(trans, root, path, 2941 ref = btrfs_lookup_inode_ref(trans, root, path,
2915 dentry->d_name.name, dentry->d_name.len, 2942 dentry->d_name.name, dentry->d_name.len,
2916 inode->i_ino, dir->i_ino, 0); 2943 ino, dir_ino, 0);
2917 if (IS_ERR(ref)) { 2944 if (IS_ERR(ref)) {
2918 err = PTR_ERR(ref); 2945 err = PTR_ERR(ref);
2919 goto out; 2946 goto out;
@@ -2922,9 +2949,17 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2922 if (check_path_shared(root, path)) 2949 if (check_path_shared(root, path))
2923 goto out; 2950 goto out;
2924 index = btrfs_inode_ref_index(path->nodes[0], ref); 2951 index = btrfs_inode_ref_index(path->nodes[0], ref);
2925 btrfs_release_path(root, path); 2952 btrfs_release_path(path);
2926 2953
2927 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index, 2954 /*
2955 * This is a commit root search, if we can lookup inode item and other
2956 * relative items in the commit root, it means the transaction of
2957 * dir/file creation has been committed, and the dir index item that we
2958 * delay to insert has also been inserted into the commit root. So
2959 * we needn't worry about the delayed insertion of the dir index item
2960 * here.
2961 */
2962 di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
2928 dentry->d_name.name, dentry->d_name.len, 0); 2963 dentry->d_name.name, dentry->d_name.len, 0);
2929 if (IS_ERR(di)) { 2964 if (IS_ERR(di)) {
2930 err = PTR_ERR(di); 2965 err = PTR_ERR(di);
@@ -2999,54 +3034,47 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
2999 struct btrfs_key key; 3034 struct btrfs_key key;
3000 u64 index; 3035 u64 index;
3001 int ret; 3036 int ret;
3037 u64 dir_ino = btrfs_ino(dir);
3002 3038
3003 path = btrfs_alloc_path(); 3039 path = btrfs_alloc_path();
3004 if (!path) 3040 if (!path)
3005 return -ENOMEM; 3041 return -ENOMEM;
3006 3042
3007 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, 3043 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3008 name, name_len, -1); 3044 name, name_len, -1);
3009 BUG_ON(!di || IS_ERR(di)); 3045 BUG_ON(IS_ERR_OR_NULL(di));
3010 3046
3011 leaf = path->nodes[0]; 3047 leaf = path->nodes[0];
3012 btrfs_dir_item_key_to_cpu(leaf, di, &key); 3048 btrfs_dir_item_key_to_cpu(leaf, di, &key);
3013 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); 3049 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
3014 ret = btrfs_delete_one_dir_name(trans, root, path, di); 3050 ret = btrfs_delete_one_dir_name(trans, root, path, di);
3015 BUG_ON(ret); 3051 BUG_ON(ret);
3016 btrfs_release_path(root, path); 3052 btrfs_release_path(path);
3017 3053
3018 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, 3054 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
3019 objectid, root->root_key.objectid, 3055 objectid, root->root_key.objectid,
3020 dir->i_ino, &index, name, name_len); 3056 dir_ino, &index, name, name_len);
3021 if (ret < 0) { 3057 if (ret < 0) {
3022 BUG_ON(ret != -ENOENT); 3058 BUG_ON(ret != -ENOENT);
3023 di = btrfs_search_dir_index_item(root, path, dir->i_ino, 3059 di = btrfs_search_dir_index_item(root, path, dir_ino,
3024 name, name_len); 3060 name, name_len);
3025 BUG_ON(!di || IS_ERR(di)); 3061 BUG_ON(IS_ERR_OR_NULL(di));
3026 3062
3027 leaf = path->nodes[0]; 3063 leaf = path->nodes[0];
3028 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3064 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3029 btrfs_release_path(root, path); 3065 btrfs_release_path(path);
3030 index = key.offset; 3066 index = key.offset;
3031 } 3067 }
3068 btrfs_release_path(path);
3032 3069
3033 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, 3070 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
3034 index, name, name_len, -1);
3035 BUG_ON(!di || IS_ERR(di));
3036
3037 leaf = path->nodes[0];
3038 btrfs_dir_item_key_to_cpu(leaf, di, &key);
3039 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
3040 ret = btrfs_delete_one_dir_name(trans, root, path, di);
3041 BUG_ON(ret); 3071 BUG_ON(ret);
3042 btrfs_release_path(root, path);
3043 3072
3044 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 3073 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3045 dir->i_mtime = dir->i_ctime = CURRENT_TIME; 3074 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3046 ret = btrfs_update_inode(trans, root, dir); 3075 ret = btrfs_update_inode(trans, root, dir);
3047 BUG_ON(ret); 3076 BUG_ON(ret);
3048 3077
3049 btrfs_free_path(path);
3050 return 0; 3078 return 0;
3051} 3079}
3052 3080
@@ -3059,7 +3087,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3059 unsigned long nr = 0; 3087 unsigned long nr = 0;
3060 3088
3061 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE || 3089 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
3062 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 3090 btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
3063 return -ENOTEMPTY; 3091 return -ENOTEMPTY;
3064 3092
3065 trans = __unlink_start_trans(dir, dentry); 3093 trans = __unlink_start_trans(dir, dentry);
@@ -3068,7 +3096,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3068 3096
3069 btrfs_set_trans_block_group(trans, dir); 3097 btrfs_set_trans_block_group(trans, dir);
3070 3098
3071 if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 3099 if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
3072 err = btrfs_unlink_subvol(trans, root, dir, 3100 err = btrfs_unlink_subvol(trans, root, dir,
3073 BTRFS_I(inode)->location.objectid, 3101 BTRFS_I(inode)->location.objectid,
3074 dentry->d_name.name, 3102 dentry->d_name.name,
@@ -3093,178 +3121,6 @@ out:
3093 return err; 3121 return err;
3094} 3122}
3095 3123
3096#if 0
3097/*
3098 * when truncating bytes in a file, it is possible to avoid reading
3099 * the leaves that contain only checksum items. This can be the
3100 * majority of the IO required to delete a large file, but it must
3101 * be done carefully.
3102 *
3103 * The keys in the level just above the leaves are checked to make sure
3104 * the lowest key in a given leaf is a csum key, and starts at an offset
3105 * after the new size.
3106 *
3107 * Then the key for the next leaf is checked to make sure it also has
3108 * a checksum item for the same file. If it does, we know our target leaf
3109 * contains only checksum items, and it can be safely freed without reading
3110 * it.
3111 *
3112 * This is just an optimization targeted at large files. It may do
3113 * nothing. It will return 0 unless things went badly.
3114 */
3115static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
3116 struct btrfs_root *root,
3117 struct btrfs_path *path,
3118 struct inode *inode, u64 new_size)
3119{
3120 struct btrfs_key key;
3121 int ret;
3122 int nritems;
3123 struct btrfs_key found_key;
3124 struct btrfs_key other_key;
3125 struct btrfs_leaf_ref *ref;
3126 u64 leaf_gen;
3127 u64 leaf_start;
3128
3129 path->lowest_level = 1;
3130 key.objectid = inode->i_ino;
3131 key.type = BTRFS_CSUM_ITEM_KEY;
3132 key.offset = new_size;
3133again:
3134 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3135 if (ret < 0)
3136 goto out;
3137
3138 if (path->nodes[1] == NULL) {
3139 ret = 0;
3140 goto out;
3141 }
3142 ret = 0;
3143 btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
3144 nritems = btrfs_header_nritems(path->nodes[1]);
3145
3146 if (!nritems)
3147 goto out;
3148
3149 if (path->slots[1] >= nritems)
3150 goto next_node;
3151
3152 /* did we find a key greater than anything we want to delete? */
3153 if (found_key.objectid > inode->i_ino ||
3154 (found_key.objectid == inode->i_ino && found_key.type > key.type))
3155 goto out;
3156
3157 /* we check the next key in the node to make sure the leave contains
3158 * only checksum items. This comparison doesn't work if our
3159 * leaf is the last one in the node
3160 */
3161 if (path->slots[1] + 1 >= nritems) {
3162next_node:
3163 /* search forward from the last key in the node, this
3164 * will bring us into the next node in the tree
3165 */
3166 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
3167
3168 /* unlikely, but we inc below, so check to be safe */
3169 if (found_key.offset == (u64)-1)
3170 goto out;
3171
3172 /* search_forward needs a path with locks held, do the
3173 * search again for the original key. It is possible
3174 * this will race with a balance and return a path that
3175 * we could modify, but this drop is just an optimization
3176 * and is allowed to miss some leaves.
3177 */
3178 btrfs_release_path(root, path);
3179 found_key.offset++;
3180
3181 /* setup a max key for search_forward */
3182 other_key.offset = (u64)-1;
3183 other_key.type = key.type;
3184 other_key.objectid = key.objectid;
3185
3186 path->keep_locks = 1;
3187 ret = btrfs_search_forward(root, &found_key, &other_key,
3188 path, 0, 0);
3189 path->keep_locks = 0;
3190 if (ret || found_key.objectid != key.objectid ||
3191 found_key.type != key.type) {
3192 ret = 0;
3193 goto out;
3194 }
3195
3196 key.offset = found_key.offset;
3197 btrfs_release_path(root, path);
3198 cond_resched();
3199 goto again;
3200 }
3201
3202 /* we know there's one more slot after us in the tree,
3203 * read that key so we can verify it is also a checksum item
3204 */
3205 btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
3206
3207 if (found_key.objectid < inode->i_ino)
3208 goto next_key;
3209
3210 if (found_key.type != key.type || found_key.offset < new_size)
3211 goto next_key;
3212
3213 /*
3214 * if the key for the next leaf isn't a csum key from this objectid,
3215 * we can't be sure there aren't good items inside this leaf.
3216 * Bail out
3217 */
3218 if (other_key.objectid != inode->i_ino || other_key.type != key.type)
3219 goto out;
3220
3221 leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
3222 leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
3223 /*
3224 * it is safe to delete this leaf, it contains only
3225 * csum items from this inode at an offset >= new_size
3226 */
3227 ret = btrfs_del_leaf(trans, root, path, leaf_start);
3228 BUG_ON(ret);
3229
3230 if (root->ref_cows && leaf_gen < trans->transid) {
3231 ref = btrfs_alloc_leaf_ref(root, 0);
3232 if (ref) {
3233 ref->root_gen = root->root_key.offset;
3234 ref->bytenr = leaf_start;
3235 ref->owner = 0;
3236 ref->generation = leaf_gen;
3237 ref->nritems = 0;
3238
3239 btrfs_sort_leaf_ref(ref);
3240
3241 ret = btrfs_add_leaf_ref(root, ref, 0);
3242 WARN_ON(ret);
3243 btrfs_free_leaf_ref(root, ref);
3244 } else {
3245 WARN_ON(1);
3246 }
3247 }
3248next_key:
3249 btrfs_release_path(root, path);
3250
3251 if (other_key.objectid == inode->i_ino &&
3252 other_key.type == key.type && other_key.offset > key.offset) {
3253 key.offset = other_key.offset;
3254 cond_resched();
3255 goto again;
3256 }
3257 ret = 0;
3258out:
3259 /* fixup any changes we've made to the path */
3260 path->lowest_level = 0;
3261 path->keep_locks = 0;
3262 btrfs_release_path(root, path);
3263 return ret;
3264}
3265
3266#endif
3267
3268/* 3124/*
3269 * this can truncate away extent items, csum items and directory items. 3125 * this can truncate away extent items, csum items and directory items.
3270 * It starts at a high offset and removes keys until it can't find 3126 * It starts at a high offset and removes keys until it can't find
@@ -3300,17 +3156,27 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3300 int encoding; 3156 int encoding;
3301 int ret; 3157 int ret;
3302 int err = 0; 3158 int err = 0;
3159 u64 ino = btrfs_ino(inode);
3303 3160
3304 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); 3161 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
3305 3162
3306 if (root->ref_cows || root == root->fs_info->tree_root) 3163 if (root->ref_cows || root == root->fs_info->tree_root)
3307 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); 3164 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
3308 3165
3166 /*
3167 * This function is also used to drop the items in the log tree before
3168 * we relog the inode, so if root != BTRFS_I(inode)->root, it means
3169 * it is used to drop the loged items. So we shouldn't kill the delayed
3170 * items.
3171 */
3172 if (min_type == 0 && root == BTRFS_I(inode)->root)
3173 btrfs_kill_delayed_inode_items(inode);
3174
3309 path = btrfs_alloc_path(); 3175 path = btrfs_alloc_path();
3310 BUG_ON(!path); 3176 BUG_ON(!path);
3311 path->reada = -1; 3177 path->reada = -1;
3312 3178
3313 key.objectid = inode->i_ino; 3179 key.objectid = ino;
3314 key.offset = (u64)-1; 3180 key.offset = (u64)-1;
3315 key.type = (u8)-1; 3181 key.type = (u8)-1;
3316 3182
@@ -3338,7 +3204,7 @@ search_again:
3338 found_type = btrfs_key_type(&found_key); 3204 found_type = btrfs_key_type(&found_key);
3339 encoding = 0; 3205 encoding = 0;
3340 3206
3341 if (found_key.objectid != inode->i_ino) 3207 if (found_key.objectid != ino)
3342 break; 3208 break;
3343 3209
3344 if (found_type < min_type) 3210 if (found_type < min_type)
@@ -3428,7 +3294,6 @@ search_again:
3428 btrfs_file_extent_calc_inline_size(size); 3294 btrfs_file_extent_calc_inline_size(size);
3429 ret = btrfs_truncate_item(trans, root, path, 3295 ret = btrfs_truncate_item(trans, root, path,
3430 size, 1); 3296 size, 1);
3431 BUG_ON(ret);
3432 } else if (root->ref_cows) { 3297 } else if (root->ref_cows) {
3433 inode_sub_bytes(inode, item_end + 1 - 3298 inode_sub_bytes(inode, item_end + 1 -
3434 found_key.offset); 3299 found_key.offset);
@@ -3457,7 +3322,7 @@ delete:
3457 ret = btrfs_free_extent(trans, root, extent_start, 3322 ret = btrfs_free_extent(trans, root, extent_start,
3458 extent_num_bytes, 0, 3323 extent_num_bytes, 0,
3459 btrfs_header_owner(leaf), 3324 btrfs_header_owner(leaf),
3460 inode->i_ino, extent_offset); 3325 ino, extent_offset);
3461 BUG_ON(ret); 3326 BUG_ON(ret);
3462 } 3327 }
3463 3328
@@ -3466,7 +3331,9 @@ delete:
3466 3331
3467 if (path->slots[0] == 0 || 3332 if (path->slots[0] == 0 ||
3468 path->slots[0] != pending_del_slot) { 3333 path->slots[0] != pending_del_slot) {
3469 if (root->ref_cows) { 3334 if (root->ref_cows &&
3335 BTRFS_I(inode)->location.objectid !=
3336 BTRFS_FREE_INO_OBJECTID) {
3470 err = -EAGAIN; 3337 err = -EAGAIN;
3471 goto out; 3338 goto out;
3472 } 3339 }
@@ -3477,7 +3344,7 @@ delete:
3477 BUG_ON(ret); 3344 BUG_ON(ret);
3478 pending_del_nr = 0; 3345 pending_del_nr = 0;
3479 } 3346 }
3480 btrfs_release_path(root, path); 3347 btrfs_release_path(path);
3481 goto search_again; 3348 goto search_again;
3482 } else { 3349 } else {
3483 path->slots[0]--; 3350 path->slots[0]--;
@@ -3635,7 +3502,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3635 while (1) { 3502 while (1) {
3636 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 3503 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
3637 block_end - cur_offset, 0); 3504 block_end - cur_offset, 0);
3638 BUG_ON(IS_ERR(em) || !em); 3505 BUG_ON(IS_ERR_OR_NULL(em));
3639 last_byte = min(extent_map_end(em), block_end); 3506 last_byte = min(extent_map_end(em), block_end);
3640 last_byte = (last_byte + mask) & ~mask; 3507 last_byte = (last_byte + mask) & ~mask;
3641 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 3508 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
@@ -3656,7 +3523,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3656 break; 3523 break;
3657 3524
3658 err = btrfs_insert_file_extent(trans, root, 3525 err = btrfs_insert_file_extent(trans, root,
3659 inode->i_ino, cur_offset, 0, 3526 btrfs_ino(inode), cur_offset, 0,
3660 0, hole_size, 0, hole_size, 3527 0, hole_size, 0, hole_size,
3661 0, 0, 0); 3528 0, 0, 0);
3662 if (err) 3529 if (err)
@@ -3758,7 +3625,7 @@ void btrfs_evict_inode(struct inode *inode)
3758 3625
3759 truncate_inode_pages(&inode->i_data, 0); 3626 truncate_inode_pages(&inode->i_data, 0);
3760 if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 || 3627 if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
3761 root == root->fs_info->tree_root)) 3628 is_free_space_inode(root, inode)))
3762 goto no_delete; 3629 goto no_delete;
3763 3630
3764 if (is_bad_inode(inode)) { 3631 if (is_bad_inode(inode)) {
@@ -3811,6 +3678,10 @@ void btrfs_evict_inode(struct inode *inode)
3811 BUG_ON(ret); 3678 BUG_ON(ret);
3812 } 3679 }
3813 3680
3681 if (!(root == root->fs_info->tree_root ||
3682 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
3683 btrfs_return_ino(root, btrfs_ino(inode));
3684
3814 nr = trans->blocks_used; 3685 nr = trans->blocks_used;
3815 btrfs_end_transaction(trans, root); 3686 btrfs_end_transaction(trans, root);
3816 btrfs_btree_balance_dirty(root, nr); 3687 btrfs_btree_balance_dirty(root, nr);
@@ -3836,12 +3707,12 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3836 path = btrfs_alloc_path(); 3707 path = btrfs_alloc_path();
3837 BUG_ON(!path); 3708 BUG_ON(!path);
3838 3709
3839 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name, 3710 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
3840 namelen, 0); 3711 namelen, 0);
3841 if (IS_ERR(di)) 3712 if (IS_ERR(di))
3842 ret = PTR_ERR(di); 3713 ret = PTR_ERR(di);
3843 3714
3844 if (!di || IS_ERR(di)) 3715 if (IS_ERR_OR_NULL(di))
3845 goto out_err; 3716 goto out_err;
3846 3717
3847 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); 3718 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
@@ -3889,7 +3760,7 @@ static int fixup_tree_root_location(struct btrfs_root *root,
3889 3760
3890 leaf = path->nodes[0]; 3761 leaf = path->nodes[0];
3891 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 3762 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
3892 if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino || 3763 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
3893 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) 3764 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
3894 goto out; 3765 goto out;
3895 3766
@@ -3899,7 +3770,7 @@ static int fixup_tree_root_location(struct btrfs_root *root,
3899 if (ret) 3770 if (ret)
3900 goto out; 3771 goto out;
3901 3772
3902 btrfs_release_path(root->fs_info->tree_root, path); 3773 btrfs_release_path(path);
3903 3774
3904 new_root = btrfs_read_fs_root_no_name(root->fs_info, location); 3775 new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
3905 if (IS_ERR(new_root)) { 3776 if (IS_ERR(new_root)) {
@@ -3928,6 +3799,7 @@ static void inode_tree_add(struct inode *inode)
3928 struct btrfs_inode *entry; 3799 struct btrfs_inode *entry;
3929 struct rb_node **p; 3800 struct rb_node **p;
3930 struct rb_node *parent; 3801 struct rb_node *parent;
3802 u64 ino = btrfs_ino(inode);
3931again: 3803again:
3932 p = &root->inode_tree.rb_node; 3804 p = &root->inode_tree.rb_node;
3933 parent = NULL; 3805 parent = NULL;
@@ -3940,9 +3812,9 @@ again:
3940 parent = *p; 3812 parent = *p;
3941 entry = rb_entry(parent, struct btrfs_inode, rb_node); 3813 entry = rb_entry(parent, struct btrfs_inode, rb_node);
3942 3814
3943 if (inode->i_ino < entry->vfs_inode.i_ino) 3815 if (ino < btrfs_ino(&entry->vfs_inode))
3944 p = &parent->rb_left; 3816 p = &parent->rb_left;
3945 else if (inode->i_ino > entry->vfs_inode.i_ino) 3817 else if (ino > btrfs_ino(&entry->vfs_inode))
3946 p = &parent->rb_right; 3818 p = &parent->rb_right;
3947 else { 3819 else {
3948 WARN_ON(!(entry->vfs_inode.i_state & 3820 WARN_ON(!(entry->vfs_inode.i_state &
@@ -4006,9 +3878,9 @@ again:
4006 prev = node; 3878 prev = node;
4007 entry = rb_entry(node, struct btrfs_inode, rb_node); 3879 entry = rb_entry(node, struct btrfs_inode, rb_node);
4008 3880
4009 if (objectid < entry->vfs_inode.i_ino) 3881 if (objectid < btrfs_ino(&entry->vfs_inode))
4010 node = node->rb_left; 3882 node = node->rb_left;
4011 else if (objectid > entry->vfs_inode.i_ino) 3883 else if (objectid > btrfs_ino(&entry->vfs_inode))
4012 node = node->rb_right; 3884 node = node->rb_right;
4013 else 3885 else
4014 break; 3886 break;
@@ -4016,7 +3888,7 @@ again:
4016 if (!node) { 3888 if (!node) {
4017 while (prev) { 3889 while (prev) {
4018 entry = rb_entry(prev, struct btrfs_inode, rb_node); 3890 entry = rb_entry(prev, struct btrfs_inode, rb_node);
4019 if (objectid <= entry->vfs_inode.i_ino) { 3891 if (objectid <= btrfs_ino(&entry->vfs_inode)) {
4020 node = prev; 3892 node = prev;
4021 break; 3893 break;
4022 } 3894 }
@@ -4025,7 +3897,7 @@ again:
4025 } 3897 }
4026 while (node) { 3898 while (node) {
4027 entry = rb_entry(node, struct btrfs_inode, rb_node); 3899 entry = rb_entry(node, struct btrfs_inode, rb_node);
4028 objectid = entry->vfs_inode.i_ino + 1; 3900 objectid = btrfs_ino(&entry->vfs_inode) + 1;
4029 inode = igrab(&entry->vfs_inode); 3901 inode = igrab(&entry->vfs_inode);
4030 if (inode) { 3902 if (inode) {
4031 spin_unlock(&root->inode_lock); 3903 spin_unlock(&root->inode_lock);
@@ -4063,7 +3935,7 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p)
4063static int btrfs_find_actor(struct inode *inode, void *opaque) 3935static int btrfs_find_actor(struct inode *inode, void *opaque)
4064{ 3936{
4065 struct btrfs_iget_args *args = opaque; 3937 struct btrfs_iget_args *args = opaque;
4066 return args->ino == inode->i_ino && 3938 return args->ino == btrfs_ino(inode) &&
4067 args->root == BTRFS_I(inode)->root; 3939 args->root == BTRFS_I(inode)->root;
4068} 3940}
4069 3941
@@ -4208,7 +4080,7 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
4208 return d_splice_alias(inode, dentry); 4080 return d_splice_alias(inode, dentry);
4209} 4081}
4210 4082
4211static unsigned char btrfs_filetype_table[] = { 4083unsigned char btrfs_filetype_table[] = {
4212 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK 4084 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
4213}; 4085};
4214 4086
@@ -4222,6 +4094,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4222 struct btrfs_key key; 4094 struct btrfs_key key;
4223 struct btrfs_key found_key; 4095 struct btrfs_key found_key;
4224 struct btrfs_path *path; 4096 struct btrfs_path *path;
4097 struct list_head ins_list;
4098 struct list_head del_list;
4225 int ret; 4099 int ret;
4226 struct extent_buffer *leaf; 4100 struct extent_buffer *leaf;
4227 int slot; 4101 int slot;
@@ -4234,6 +4108,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4234 char tmp_name[32]; 4108 char tmp_name[32];
4235 char *name_ptr; 4109 char *name_ptr;
4236 int name_len; 4110 int name_len;
4111 int is_curr = 0; /* filp->f_pos points to the current index? */
4237 4112
4238 /* FIXME, use a real flag for deciding about the key type */ 4113 /* FIXME, use a real flag for deciding about the key type */
4239 if (root->fs_info->tree_root == root) 4114 if (root->fs_info->tree_root == root)
@@ -4241,9 +4116,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4241 4116
4242 /* special case for "." */ 4117 /* special case for "." */
4243 if (filp->f_pos == 0) { 4118 if (filp->f_pos == 0) {
4244 over = filldir(dirent, ".", 1, 4119 over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR);
4245 1, inode->i_ino,
4246 DT_DIR);
4247 if (over) 4120 if (over)
4248 return 0; 4121 return 0;
4249 filp->f_pos = 1; 4122 filp->f_pos = 1;
@@ -4258,11 +4131,19 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4258 filp->f_pos = 2; 4131 filp->f_pos = 2;
4259 } 4132 }
4260 path = btrfs_alloc_path(); 4133 path = btrfs_alloc_path();
4134 if (!path)
4135 return -ENOMEM;
4261 path->reada = 2; 4136 path->reada = 2;
4262 4137
4138 if (key_type == BTRFS_DIR_INDEX_KEY) {
4139 INIT_LIST_HEAD(&ins_list);
4140 INIT_LIST_HEAD(&del_list);
4141 btrfs_get_delayed_items(inode, &ins_list, &del_list);
4142 }
4143
4263 btrfs_set_key_type(&key, key_type); 4144 btrfs_set_key_type(&key, key_type);
4264 key.offset = filp->f_pos; 4145 key.offset = filp->f_pos;
4265 key.objectid = inode->i_ino; 4146 key.objectid = btrfs_ino(inode);
4266 4147
4267 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4148 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4268 if (ret < 0) 4149 if (ret < 0)
@@ -4289,8 +4170,13 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4289 break; 4170 break;
4290 if (found_key.offset < filp->f_pos) 4171 if (found_key.offset < filp->f_pos)
4291 goto next; 4172 goto next;
4173 if (key_type == BTRFS_DIR_INDEX_KEY &&
4174 btrfs_should_delete_dir_index(&del_list,
4175 found_key.offset))
4176 goto next;
4292 4177
4293 filp->f_pos = found_key.offset; 4178 filp->f_pos = found_key.offset;
4179 is_curr = 1;
4294 4180
4295 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); 4181 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
4296 di_cur = 0; 4182 di_cur = 0;
@@ -4345,6 +4231,15 @@ next:
4345 path->slots[0]++; 4231 path->slots[0]++;
4346 } 4232 }
4347 4233
4234 if (key_type == BTRFS_DIR_INDEX_KEY) {
4235 if (is_curr)
4236 filp->f_pos++;
4237 ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir,
4238 &ins_list);
4239 if (ret)
4240 goto nopos;
4241 }
4242
4348 /* Reached end of directory/root. Bump pos past the last item. */ 4243 /* Reached end of directory/root. Bump pos past the last item. */
4349 if (key_type == BTRFS_DIR_INDEX_KEY) 4244 if (key_type == BTRFS_DIR_INDEX_KEY)
4350 /* 4245 /*
@@ -4357,6 +4252,8 @@ next:
4357nopos: 4252nopos:
4358 ret = 0; 4253 ret = 0;
4359err: 4254err:
4255 if (key_type == BTRFS_DIR_INDEX_KEY)
4256 btrfs_put_delayed_items(&ins_list, &del_list);
4360 btrfs_free_path(path); 4257 btrfs_free_path(path);
4361 return ret; 4258 return ret;
4362} 4259}
@@ -4372,7 +4269,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
4372 return 0; 4269 return 0;
4373 4270
4374 smp_mb(); 4271 smp_mb();
4375 nolock = (root->fs_info->closing && root == root->fs_info->tree_root); 4272 if (root->fs_info->closing && is_free_space_inode(root, inode))
4273 nolock = true;
4376 4274
4377 if (wbc->sync_mode == WB_SYNC_ALL) { 4275 if (wbc->sync_mode == WB_SYNC_ALL) {
4378 if (nolock) 4276 if (nolock)
@@ -4415,25 +4313,25 @@ void btrfs_dirty_inode(struct inode *inode)
4415 btrfs_end_transaction(trans, root); 4313 btrfs_end_transaction(trans, root);
4416 trans = btrfs_start_transaction(root, 1); 4314 trans = btrfs_start_transaction(root, 1);
4417 if (IS_ERR(trans)) { 4315 if (IS_ERR(trans)) {
4418 if (printk_ratelimit()) { 4316 printk_ratelimited(KERN_ERR "btrfs: fail to "
4419 printk(KERN_ERR "btrfs: fail to " 4317 "dirty inode %llu error %ld\n",
4420 "dirty inode %lu error %ld\n", 4318 (unsigned long long)btrfs_ino(inode),
4421 inode->i_ino, PTR_ERR(trans)); 4319 PTR_ERR(trans));
4422 }
4423 return; 4320 return;
4424 } 4321 }
4425 btrfs_set_trans_block_group(trans, inode); 4322 btrfs_set_trans_block_group(trans, inode);
4426 4323
4427 ret = btrfs_update_inode(trans, root, inode); 4324 ret = btrfs_update_inode(trans, root, inode);
4428 if (ret) { 4325 if (ret) {
4429 if (printk_ratelimit()) { 4326 printk_ratelimited(KERN_ERR "btrfs: fail to "
4430 printk(KERN_ERR "btrfs: fail to " 4327 "dirty inode %llu error %d\n",
4431 "dirty inode %lu error %d\n", 4328 (unsigned long long)btrfs_ino(inode),
4432 inode->i_ino, ret); 4329 ret);
4433 }
4434 } 4330 }
4435 } 4331 }
4436 btrfs_end_transaction(trans, root); 4332 btrfs_end_transaction(trans, root);
4333 if (BTRFS_I(inode)->delayed_node)
4334 btrfs_balance_delayed_items(root);
4437} 4335}
4438 4336
4439/* 4337/*
@@ -4449,7 +4347,7 @@ static int btrfs_set_inode_index_count(struct inode *inode)
4449 struct extent_buffer *leaf; 4347 struct extent_buffer *leaf;
4450 int ret; 4348 int ret;
4451 4349
4452 key.objectid = inode->i_ino; 4350 key.objectid = btrfs_ino(inode);
4453 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); 4351 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
4454 key.offset = (u64)-1; 4352 key.offset = (u64)-1;
4455 4353
@@ -4481,7 +4379,7 @@ static int btrfs_set_inode_index_count(struct inode *inode)
4481 leaf = path->nodes[0]; 4379 leaf = path->nodes[0];
4482 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 4380 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4483 4381
4484 if (found_key.objectid != inode->i_ino || 4382 if (found_key.objectid != btrfs_ino(inode) ||
4485 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) { 4383 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
4486 BTRFS_I(inode)->index_cnt = 2; 4384 BTRFS_I(inode)->index_cnt = 2;
4487 goto out; 4385 goto out;
@@ -4502,9 +4400,12 @@ int btrfs_set_inode_index(struct inode *dir, u64 *index)
4502 int ret = 0; 4400 int ret = 0;
4503 4401
4504 if (BTRFS_I(dir)->index_cnt == (u64)-1) { 4402 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
4505 ret = btrfs_set_inode_index_count(dir); 4403 ret = btrfs_inode_delayed_dir_index_count(dir);
4506 if (ret) 4404 if (ret) {
4507 return ret; 4405 ret = btrfs_set_inode_index_count(dir);
4406 if (ret)
4407 return ret;
4408 }
4508 } 4409 }
4509 4410
4510 *index = BTRFS_I(dir)->index_cnt; 4411 *index = BTRFS_I(dir)->index_cnt;
@@ -4540,6 +4441,12 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4540 return ERR_PTR(-ENOMEM); 4441 return ERR_PTR(-ENOMEM);
4541 } 4442 }
4542 4443
4444 /*
4445 * we have to initialize this early, so we can reclaim the inode
4446 * number if we fail afterwards in this function.
4447 */
4448 inode->i_ino = objectid;
4449
4543 if (dir) { 4450 if (dir) {
4544 trace_btrfs_inode_request(dir); 4451 trace_btrfs_inode_request(dir);
4545 4452
@@ -4585,7 +4492,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4585 goto fail; 4492 goto fail;
4586 4493
4587 inode_init_owner(inode, dir, mode); 4494 inode_init_owner(inode, dir, mode);
4588 inode->i_ino = objectid;
4589 inode_set_bytes(inode, 0); 4495 inode_set_bytes(inode, 0);
4590 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 4496 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4591 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 4497 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
@@ -4649,29 +4555,29 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
4649 int ret = 0; 4555 int ret = 0;
4650 struct btrfs_key key; 4556 struct btrfs_key key;
4651 struct btrfs_root *root = BTRFS_I(parent_inode)->root; 4557 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4558 u64 ino = btrfs_ino(inode);
4559 u64 parent_ino = btrfs_ino(parent_inode);
4652 4560
4653 if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { 4561 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4654 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key)); 4562 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
4655 } else { 4563 } else {
4656 key.objectid = inode->i_ino; 4564 key.objectid = ino;
4657 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); 4565 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
4658 key.offset = 0; 4566 key.offset = 0;
4659 } 4567 }
4660 4568
4661 if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { 4569 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4662 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, 4570 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
4663 key.objectid, root->root_key.objectid, 4571 key.objectid, root->root_key.objectid,
4664 parent_inode->i_ino, 4572 parent_ino, index, name, name_len);
4665 index, name, name_len);
4666 } else if (add_backref) { 4573 } else if (add_backref) {
4667 ret = btrfs_insert_inode_ref(trans, root, 4574 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
4668 name, name_len, inode->i_ino, 4575 parent_ino, index);
4669 parent_inode->i_ino, index);
4670 } 4576 }
4671 4577
4672 if (ret == 0) { 4578 if (ret == 0) {
4673 ret = btrfs_insert_dir_item(trans, root, name, name_len, 4579 ret = btrfs_insert_dir_item(trans, root, name, name_len,
4674 parent_inode->i_ino, &key, 4580 parent_inode, &key,
4675 btrfs_inode_type(inode), index); 4581 btrfs_inode_type(inode), index);
4676 BUG_ON(ret); 4582 BUG_ON(ret);
4677 4583
@@ -4714,10 +4620,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4714 if (!new_valid_dev(rdev)) 4620 if (!new_valid_dev(rdev))
4715 return -EINVAL; 4621 return -EINVAL;
4716 4622
4717 err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
4718 if (err)
4719 return err;
4720
4721 /* 4623 /*
4722 * 2 for inode item and ref 4624 * 2 for inode item and ref
4723 * 2 for dir items 4625 * 2 for dir items
@@ -4729,8 +4631,12 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4729 4631
4730 btrfs_set_trans_block_group(trans, dir); 4632 btrfs_set_trans_block_group(trans, dir);
4731 4633
4634 err = btrfs_find_free_ino(root, &objectid);
4635 if (err)
4636 goto out_unlock;
4637
4732 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4638 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4733 dentry->d_name.len, dir->i_ino, objectid, 4639 dentry->d_name.len, btrfs_ino(dir), objectid,
4734 BTRFS_I(dir)->block_group, mode, &index); 4640 BTRFS_I(dir)->block_group, mode, &index);
4735 if (IS_ERR(inode)) { 4641 if (IS_ERR(inode)) {
4736 err = PTR_ERR(inode); 4642 err = PTR_ERR(inode);
@@ -4777,9 +4683,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4777 u64 objectid; 4683 u64 objectid;
4778 u64 index = 0; 4684 u64 index = 0;
4779 4685
4780 err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
4781 if (err)
4782 return err;
4783 /* 4686 /*
4784 * 2 for inode item and ref 4687 * 2 for inode item and ref
4785 * 2 for dir items 4688 * 2 for dir items
@@ -4791,8 +4694,12 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4791 4694
4792 btrfs_set_trans_block_group(trans, dir); 4695 btrfs_set_trans_block_group(trans, dir);
4793 4696
4697 err = btrfs_find_free_ino(root, &objectid);
4698 if (err)
4699 goto out_unlock;
4700
4794 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4701 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4795 dentry->d_name.len, dir->i_ino, objectid, 4702 dentry->d_name.len, btrfs_ino(dir), objectid,
4796 BTRFS_I(dir)->block_group, mode, &index); 4703 BTRFS_I(dir)->block_group, mode, &index);
4797 if (IS_ERR(inode)) { 4704 if (IS_ERR(inode)) {
4798 err = PTR_ERR(inode); 4705 err = PTR_ERR(inode);
@@ -4903,10 +4810,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4903 u64 index = 0; 4810 u64 index = 0;
4904 unsigned long nr = 1; 4811 unsigned long nr = 1;
4905 4812
4906 err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
4907 if (err)
4908 return err;
4909
4910 /* 4813 /*
4911 * 2 items for inode and ref 4814 * 2 items for inode and ref
4912 * 2 items for dir items 4815 * 2 items for dir items
@@ -4917,8 +4820,12 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4917 return PTR_ERR(trans); 4820 return PTR_ERR(trans);
4918 btrfs_set_trans_block_group(trans, dir); 4821 btrfs_set_trans_block_group(trans, dir);
4919 4822
4823 err = btrfs_find_free_ino(root, &objectid);
4824 if (err)
4825 goto out_fail;
4826
4920 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4827 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4921 dentry->d_name.len, dir->i_ino, objectid, 4828 dentry->d_name.len, btrfs_ino(dir), objectid,
4922 BTRFS_I(dir)->block_group, S_IFDIR | mode, 4829 BTRFS_I(dir)->block_group, S_IFDIR | mode,
4923 &index); 4830 &index);
4924 if (IS_ERR(inode)) { 4831 if (IS_ERR(inode)) {
@@ -5041,7 +4948,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
5041 u64 bytenr; 4948 u64 bytenr;
5042 u64 extent_start = 0; 4949 u64 extent_start = 0;
5043 u64 extent_end = 0; 4950 u64 extent_end = 0;
5044 u64 objectid = inode->i_ino; 4951 u64 objectid = btrfs_ino(inode);
5045 u32 found_type; 4952 u32 found_type;
5046 struct btrfs_path *path = NULL; 4953 struct btrfs_path *path = NULL;
5047 struct btrfs_root *root = BTRFS_I(inode)->root; 4954 struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -5069,7 +4976,7 @@ again:
5069 else 4976 else
5070 goto out; 4977 goto out;
5071 } 4978 }
5072 em = alloc_extent_map(GFP_NOFS); 4979 em = alloc_extent_map();
5073 if (!em) { 4980 if (!em) {
5074 err = -ENOMEM; 4981 err = -ENOMEM;
5075 goto out; 4982 goto out;
@@ -5223,7 +5130,7 @@ again:
5223 kunmap(page); 5130 kunmap(page);
5224 free_extent_map(em); 5131 free_extent_map(em);
5225 em = NULL; 5132 em = NULL;
5226 btrfs_release_path(root, path); 5133 btrfs_release_path(path);
5227 trans = btrfs_join_transaction(root, 1); 5134 trans = btrfs_join_transaction(root, 1);
5228 if (IS_ERR(trans)) 5135 if (IS_ERR(trans))
5229 return ERR_CAST(trans); 5136 return ERR_CAST(trans);
@@ -5249,7 +5156,7 @@ not_found_em:
5249 em->block_start = EXTENT_MAP_HOLE; 5156 em->block_start = EXTENT_MAP_HOLE;
5250 set_bit(EXTENT_FLAG_VACANCY, &em->flags); 5157 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
5251insert: 5158insert:
5252 btrfs_release_path(root, path); 5159 btrfs_release_path(path);
5253 if (em->start > start || extent_map_end(em) <= start) { 5160 if (em->start > start || extent_map_end(em) <= start) {
5254 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed " 5161 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
5255 "[%llu %llu]\n", (unsigned long long)em->start, 5162 "[%llu %llu]\n", (unsigned long long)em->start,
@@ -5382,7 +5289,7 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag
5382 u64 hole_start = start; 5289 u64 hole_start = start;
5383 u64 hole_len = len; 5290 u64 hole_len = len;
5384 5291
5385 em = alloc_extent_map(GFP_NOFS); 5292 em = alloc_extent_map();
5386 if (!em) { 5293 if (!em) {
5387 err = -ENOMEM; 5294 err = -ENOMEM;
5388 goto out; 5295 goto out;
@@ -5472,6 +5379,9 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5472 if (IS_ERR(trans)) 5379 if (IS_ERR(trans))
5473 return ERR_CAST(trans); 5380 return ERR_CAST(trans);
5474 5381
5382 if (start <= BTRFS_I(inode)->disk_i_size && len < 64 * 1024)
5383 btrfs_add_inode_defrag(trans, inode);
5384
5475 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 5385 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
5476 5386
5477 alloc_hint = get_extent_allocation_hint(inode, start, len); 5387 alloc_hint = get_extent_allocation_hint(inode, start, len);
@@ -5483,7 +5393,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5483 } 5393 }
5484 5394
5485 if (!em) { 5395 if (!em) {
5486 em = alloc_extent_map(GFP_NOFS); 5396 em = alloc_extent_map();
5487 if (!em) { 5397 if (!em) {
5488 em = ERR_PTR(-ENOMEM); 5398 em = ERR_PTR(-ENOMEM);
5489 goto out; 5399 goto out;
@@ -5549,7 +5459,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
5549 if (!path) 5459 if (!path)
5550 return -ENOMEM; 5460 return -ENOMEM;
5551 5461
5552 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, 5462 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
5553 offset, 0); 5463 offset, 0);
5554 if (ret < 0) 5464 if (ret < 0)
5555 goto out; 5465 goto out;
@@ -5566,7 +5476,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
5566 ret = 0; 5476 ret = 0;
5567 leaf = path->nodes[0]; 5477 leaf = path->nodes[0];
5568 btrfs_item_key_to_cpu(leaf, &key, slot); 5478 btrfs_item_key_to_cpu(leaf, &key, slot);
5569 if (key.objectid != inode->i_ino || 5479 if (key.objectid != btrfs_ino(inode) ||
5570 key.type != BTRFS_EXTENT_DATA_KEY) { 5480 key.type != BTRFS_EXTENT_DATA_KEY) {
5571 /* not our file or wrong item type, must cow */ 5481 /* not our file or wrong item type, must cow */
5572 goto out; 5482 goto out;
@@ -5600,7 +5510,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
5600 * look for other files referencing this extent, if we 5510 * look for other files referencing this extent, if we
5601 * find any we must cow 5511 * find any we must cow
5602 */ 5512 */
5603 if (btrfs_cross_ref_exist(trans, root, inode->i_ino, 5513 if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
5604 key.offset - backref_offset, disk_bytenr)) 5514 key.offset - backref_offset, disk_bytenr))
5605 goto out; 5515 goto out;
5606 5516
@@ -5790,9 +5700,10 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
5790 5700
5791 flush_dcache_page(bvec->bv_page); 5701 flush_dcache_page(bvec->bv_page);
5792 if (csum != *private) { 5702 if (csum != *private) {
5793 printk(KERN_ERR "btrfs csum failed ino %lu off" 5703 printk(KERN_ERR "btrfs csum failed ino %llu off"
5794 " %llu csum %u private %u\n", 5704 " %llu csum %u private %u\n",
5795 inode->i_ino, (unsigned long long)start, 5705 (unsigned long long)btrfs_ino(inode),
5706 (unsigned long long)start,
5796 csum, *private); 5707 csum, *private);
5797 err = -EIO; 5708 err = -EIO;
5798 } 5709 }
@@ -5939,9 +5850,9 @@ static void btrfs_end_dio_bio(struct bio *bio, int err)
5939 struct btrfs_dio_private *dip = bio->bi_private; 5850 struct btrfs_dio_private *dip = bio->bi_private;
5940 5851
5941 if (err) { 5852 if (err) {
5942 printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu " 5853 printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
5943 "sector %#Lx len %u err no %d\n", 5854 "sector %#Lx len %u err no %d\n",
5944 dip->inode->i_ino, bio->bi_rw, 5855 (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw,
5945 (unsigned long long)bio->bi_sector, bio->bi_size, err); 5856 (unsigned long long)bio->bi_sector, bio->bi_size, err);
5946 dip->errors = 1; 5857 dip->errors = 1;
5947 5858
@@ -6782,12 +6693,15 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
6782 ei->ordered_data_close = 0; 6693 ei->ordered_data_close = 0;
6783 ei->orphan_meta_reserved = 0; 6694 ei->orphan_meta_reserved = 0;
6784 ei->dummy_inode = 0; 6695 ei->dummy_inode = 0;
6696 ei->in_defrag = 0;
6785 ei->force_compress = BTRFS_COMPRESS_NONE; 6697 ei->force_compress = BTRFS_COMPRESS_NONE;
6786 6698
6699 ei->delayed_node = NULL;
6700
6787 inode = &ei->vfs_inode; 6701 inode = &ei->vfs_inode;
6788 extent_map_tree_init(&ei->extent_tree, GFP_NOFS); 6702 extent_map_tree_init(&ei->extent_tree);
6789 extent_io_tree_init(&ei->io_tree, &inode->i_data, GFP_NOFS); 6703 extent_io_tree_init(&ei->io_tree, &inode->i_data);
6790 extent_io_tree_init(&ei->io_failure_tree, &inode->i_data, GFP_NOFS); 6704 extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
6791 mutex_init(&ei->log_mutex); 6705 mutex_init(&ei->log_mutex);
6792 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 6706 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
6793 INIT_LIST_HEAD(&ei->i_orphan); 6707 INIT_LIST_HEAD(&ei->i_orphan);
@@ -6851,8 +6765,8 @@ void btrfs_destroy_inode(struct inode *inode)
6851 6765
6852 spin_lock(&root->orphan_lock); 6766 spin_lock(&root->orphan_lock);
6853 if (!list_empty(&BTRFS_I(inode)->i_orphan)) { 6767 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
6854 printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n", 6768 printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
6855 inode->i_ino); 6769 (unsigned long long)btrfs_ino(inode));
6856 list_del_init(&BTRFS_I(inode)->i_orphan); 6770 list_del_init(&BTRFS_I(inode)->i_orphan);
6857 } 6771 }
6858 spin_unlock(&root->orphan_lock); 6772 spin_unlock(&root->orphan_lock);
@@ -6874,6 +6788,7 @@ void btrfs_destroy_inode(struct inode *inode)
6874 inode_tree_del(inode); 6788 inode_tree_del(inode);
6875 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); 6789 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
6876free: 6790free:
6791 btrfs_remove_delayed_node(inode);
6877 call_rcu(&inode->i_rcu, btrfs_i_callback); 6792 call_rcu(&inode->i_rcu, btrfs_i_callback);
6878} 6793}
6879 6794
@@ -6882,7 +6797,7 @@ int btrfs_drop_inode(struct inode *inode)
6882 struct btrfs_root *root = BTRFS_I(inode)->root; 6797 struct btrfs_root *root = BTRFS_I(inode)->root;
6883 6798
6884 if (btrfs_root_refs(&root->root_item) == 0 && 6799 if (btrfs_root_refs(&root->root_item) == 0 &&
6885 root != root->fs_info->tree_root) 6800 !is_free_space_inode(root, inode))
6886 return 1; 6801 return 1;
6887 else 6802 else
6888 return generic_drop_inode(inode); 6803 return generic_drop_inode(inode);
@@ -6991,16 +6906,17 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
6991 u64 index = 0; 6906 u64 index = 0;
6992 u64 root_objectid; 6907 u64 root_objectid;
6993 int ret; 6908 int ret;
6909 u64 old_ino = btrfs_ino(old_inode);
6994 6910
6995 if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 6911 if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
6996 return -EPERM; 6912 return -EPERM;
6997 6913
6998 /* we only allow rename subvolume link between subvolumes */ 6914 /* we only allow rename subvolume link between subvolumes */
6999 if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 6915 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
7000 return -EXDEV; 6916 return -EXDEV;
7001 6917
7002 if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || 6918 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
7003 (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) 6919 (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
7004 return -ENOTEMPTY; 6920 return -ENOTEMPTY;
7005 6921
7006 if (S_ISDIR(old_inode->i_mode) && new_inode && 6922 if (S_ISDIR(old_inode->i_mode) && new_inode &&
@@ -7016,7 +6932,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7016 filemap_flush(old_inode->i_mapping); 6932 filemap_flush(old_inode->i_mapping);
7017 6933
7018 /* close the racy window with snapshot create/destroy ioctl */ 6934 /* close the racy window with snapshot create/destroy ioctl */
7019 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 6935 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
7020 down_read(&root->fs_info->subvol_sem); 6936 down_read(&root->fs_info->subvol_sem);
7021 /* 6937 /*
7022 * We want to reserve the absolute worst case amount of items. So if 6938 * We want to reserve the absolute worst case amount of items. So if
@@ -7041,15 +6957,15 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7041 if (ret) 6957 if (ret)
7042 goto out_fail; 6958 goto out_fail;
7043 6959
7044 if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { 6960 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
7045 /* force full log commit if subvolume involved. */ 6961 /* force full log commit if subvolume involved. */
7046 root->fs_info->last_trans_log_full_commit = trans->transid; 6962 root->fs_info->last_trans_log_full_commit = trans->transid;
7047 } else { 6963 } else {
7048 ret = btrfs_insert_inode_ref(trans, dest, 6964 ret = btrfs_insert_inode_ref(trans, dest,
7049 new_dentry->d_name.name, 6965 new_dentry->d_name.name,
7050 new_dentry->d_name.len, 6966 new_dentry->d_name.len,
7051 old_inode->i_ino, 6967 old_ino,
7052 new_dir->i_ino, index); 6968 btrfs_ino(new_dir), index);
7053 if (ret) 6969 if (ret)
7054 goto out_fail; 6970 goto out_fail;
7055 /* 6971 /*
@@ -7065,10 +6981,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7065 * make sure the inode gets flushed if it is replacing 6981 * make sure the inode gets flushed if it is replacing
7066 * something. 6982 * something.
7067 */ 6983 */
7068 if (new_inode && new_inode->i_size && 6984 if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
7069 old_inode && S_ISREG(old_inode->i_mode)) {
7070 btrfs_add_ordered_operation(trans, root, old_inode); 6985 btrfs_add_ordered_operation(trans, root, old_inode);
7071 }
7072 6986
7073 old_dir->i_ctime = old_dir->i_mtime = ctime; 6987 old_dir->i_ctime = old_dir->i_mtime = ctime;
7074 new_dir->i_ctime = new_dir->i_mtime = ctime; 6988 new_dir->i_ctime = new_dir->i_mtime = ctime;
@@ -7077,7 +6991,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7077 if (old_dentry->d_parent != new_dentry->d_parent) 6991 if (old_dentry->d_parent != new_dentry->d_parent)
7078 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1); 6992 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
7079 6993
7080 if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { 6994 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
7081 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; 6995 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
7082 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid, 6996 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
7083 old_dentry->d_name.name, 6997 old_dentry->d_name.name,
@@ -7094,7 +7008,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7094 7008
7095 if (new_inode) { 7009 if (new_inode) {
7096 new_inode->i_ctime = CURRENT_TIME; 7010 new_inode->i_ctime = CURRENT_TIME;
7097 if (unlikely(new_inode->i_ino == 7011 if (unlikely(btrfs_ino(new_inode) ==
7098 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 7012 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
7099 root_objectid = BTRFS_I(new_inode)->location.objectid; 7013 root_objectid = BTRFS_I(new_inode)->location.objectid;
7100 ret = btrfs_unlink_subvol(trans, dest, new_dir, 7014 ret = btrfs_unlink_subvol(trans, dest, new_dir,
@@ -7122,7 +7036,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7122 new_dentry->d_name.len, 0, index); 7036 new_dentry->d_name.len, 0, index);
7123 BUG_ON(ret); 7037 BUG_ON(ret);
7124 7038
7125 if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) { 7039 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
7126 struct dentry *parent = dget_parent(new_dentry); 7040 struct dentry *parent = dget_parent(new_dentry);
7127 btrfs_log_new_name(trans, old_inode, old_dir, parent); 7041 btrfs_log_new_name(trans, old_inode, old_dir, parent);
7128 dput(parent); 7042 dput(parent);
@@ -7131,7 +7045,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7131out_fail: 7045out_fail:
7132 btrfs_end_transaction_throttle(trans, root); 7046 btrfs_end_transaction_throttle(trans, root);
7133out_notrans: 7047out_notrans:
7134 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 7048 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
7135 up_read(&root->fs_info->subvol_sem); 7049 up_read(&root->fs_info->subvol_sem);
7136 7050
7137 return ret; 7051 return ret;
@@ -7185,58 +7099,6 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
7185 return 0; 7099 return 0;
7186} 7100}
7187 7101
7188int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput,
7189 int sync)
7190{
7191 struct btrfs_inode *binode;
7192 struct inode *inode = NULL;
7193
7194 spin_lock(&root->fs_info->delalloc_lock);
7195 while (!list_empty(&root->fs_info->delalloc_inodes)) {
7196 binode = list_entry(root->fs_info->delalloc_inodes.next,
7197 struct btrfs_inode, delalloc_inodes);
7198 inode = igrab(&binode->vfs_inode);
7199 if (inode) {
7200 list_move_tail(&binode->delalloc_inodes,
7201 &root->fs_info->delalloc_inodes);
7202 break;
7203 }
7204
7205 list_del_init(&binode->delalloc_inodes);
7206 cond_resched_lock(&root->fs_info->delalloc_lock);
7207 }
7208 spin_unlock(&root->fs_info->delalloc_lock);
7209
7210 if (inode) {
7211 if (sync) {
7212 filemap_write_and_wait(inode->i_mapping);
7213 /*
7214 * We have to do this because compression doesn't
7215 * actually set PG_writeback until it submits the pages
7216 * for IO, which happens in an async thread, so we could
7217 * race and not actually wait for any writeback pages
7218 * because they've not been submitted yet. Technically
7219 * this could still be the case for the ordered stuff
7220 * since the async thread may not have started to do its
7221 * work yet. If this becomes the case then we need to
7222 * figure out a way to make sure that in writepage we
7223 * wait for any async pages to be submitted before
7224 * returning so that fdatawait does what its supposed to
7225 * do.
7226 */
7227 btrfs_wait_ordered_range(inode, 0, (u64)-1);
7228 } else {
7229 filemap_flush(inode->i_mapping);
7230 }
7231 if (delay_iput)
7232 btrfs_add_delayed_iput(inode);
7233 else
7234 iput(inode);
7235 return 1;
7236 }
7237 return 0;
7238}
7239
7240static int btrfs_symlink(struct inode *dir, struct dentry *dentry, 7102static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7241 const char *symname) 7103 const char *symname)
7242{ 7104{
@@ -7260,9 +7122,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7260 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) 7122 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
7261 return -ENAMETOOLONG; 7123 return -ENAMETOOLONG;
7262 7124
7263 err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
7264 if (err)
7265 return err;
7266 /* 7125 /*
7267 * 2 items for inode item and ref 7126 * 2 items for inode item and ref
7268 * 2 items for dir items 7127 * 2 items for dir items
@@ -7274,8 +7133,12 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7274 7133
7275 btrfs_set_trans_block_group(trans, dir); 7134 btrfs_set_trans_block_group(trans, dir);
7276 7135
7136 err = btrfs_find_free_ino(root, &objectid);
7137 if (err)
7138 goto out_unlock;
7139
7277 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 7140 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
7278 dentry->d_name.len, dir->i_ino, objectid, 7141 dentry->d_name.len, btrfs_ino(dir), objectid,
7279 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, 7142 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
7280 &index); 7143 &index);
7281 if (IS_ERR(inode)) { 7144 if (IS_ERR(inode)) {
@@ -7307,7 +7170,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7307 7170
7308 path = btrfs_alloc_path(); 7171 path = btrfs_alloc_path();
7309 BUG_ON(!path); 7172 BUG_ON(!path);
7310 key.objectid = inode->i_ino; 7173 key.objectid = btrfs_ino(inode);
7311 key.offset = 0; 7174 key.offset = 0;
7312 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); 7175 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
7313 datasize = btrfs_file_extent_calc_inline_size(name_len); 7176 datasize = btrfs_file_extent_calc_inline_size(name_len);
@@ -7315,6 +7178,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7315 datasize); 7178 datasize);
7316 if (err) { 7179 if (err) {
7317 drop_inode = 1; 7180 drop_inode = 1;
7181 btrfs_free_path(path);
7318 goto out_unlock; 7182 goto out_unlock;
7319 } 7183 }
7320 leaf = path->nodes[0]; 7184 leaf = path->nodes[0];
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 2616f7ed4799..85e818ce00c5 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -50,6 +50,7 @@
50#include "print-tree.h" 50#include "print-tree.h"
51#include "volumes.h" 51#include "volumes.h"
52#include "locking.h" 52#include "locking.h"
53#include "inode-map.h"
53 54
54/* Mask out flags that are inappropriate for the given type of inode. */ 55/* Mask out flags that are inappropriate for the given type of inode. */
55static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) 56static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
@@ -281,8 +282,9 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
281 if (!capable(CAP_SYS_ADMIN)) 282 if (!capable(CAP_SYS_ADMIN))
282 return -EPERM; 283 return -EPERM;
283 284
284 mutex_lock(&fs_info->fs_devices->device_list_mutex); 285 rcu_read_lock();
285 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) { 286 list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
287 dev_list) {
286 if (!device->bdev) 288 if (!device->bdev)
287 continue; 289 continue;
288 q = bdev_get_queue(device->bdev); 290 q = bdev_get_queue(device->bdev);
@@ -292,7 +294,7 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
292 minlen); 294 minlen);
293 } 295 }
294 } 296 }
295 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 297 rcu_read_unlock();
296 if (!num_devices) 298 if (!num_devices)
297 return -EOPNOTSUPP; 299 return -EOPNOTSUPP;
298 300
@@ -329,8 +331,7 @@ static noinline int create_subvol(struct btrfs_root *root,
329 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID; 331 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
330 u64 index = 0; 332 u64 index = 0;
331 333
332 ret = btrfs_find_free_objectid(NULL, root->fs_info->tree_root, 334 ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid);
333 0, &objectid);
334 if (ret) { 335 if (ret) {
335 dput(parent); 336 dput(parent);
336 return ret; 337 return ret;
@@ -422,7 +423,7 @@ static noinline int create_subvol(struct btrfs_root *root,
422 BUG_ON(ret); 423 BUG_ON(ret);
423 424
424 ret = btrfs_insert_dir_item(trans, root, 425 ret = btrfs_insert_dir_item(trans, root,
425 name, namelen, dir->i_ino, &key, 426 name, namelen, dir, &key,
426 BTRFS_FT_DIR, index); 427 BTRFS_FT_DIR, index);
427 if (ret) 428 if (ret)
428 goto fail; 429 goto fail;
@@ -433,7 +434,7 @@ static noinline int create_subvol(struct btrfs_root *root,
433 434
434 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, 435 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
435 objectid, root->root_key.objectid, 436 objectid, root->root_key.objectid,
436 dir->i_ino, index, name, namelen); 437 btrfs_ino(dir), index, name, namelen);
437 438
438 BUG_ON(ret); 439 BUG_ON(ret);
439 440
@@ -655,6 +656,106 @@ out_unlock:
655 return error; 656 return error;
656} 657}
657 658
659/*
660 * When we're defragging a range, we don't want to kick it off again
661 * if it is really just waiting for delalloc to send it down.
662 * If we find a nice big extent or delalloc range for the bytes in the
663 * file you want to defrag, we return 0 to let you know to skip this
664 * part of the file
665 */
666static int check_defrag_in_cache(struct inode *inode, u64 offset, int thresh)
667{
668 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
669 struct extent_map *em = NULL;
670 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
671 u64 end;
672
673 read_lock(&em_tree->lock);
674 em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
675 read_unlock(&em_tree->lock);
676
677 if (em) {
678 end = extent_map_end(em);
679 free_extent_map(em);
680 if (end - offset > thresh)
681 return 0;
682 }
683 /* if we already have a nice delalloc here, just stop */
684 thresh /= 2;
685 end = count_range_bits(io_tree, &offset, offset + thresh,
686 thresh, EXTENT_DELALLOC, 1);
687 if (end >= thresh)
688 return 0;
689 return 1;
690}
691
692/*
693 * helper function to walk through a file and find extents
694 * newer than a specific transid, and smaller than thresh.
695 *
696 * This is used by the defragging code to find new and small
697 * extents
698 */
699static int find_new_extents(struct btrfs_root *root,
700 struct inode *inode, u64 newer_than,
701 u64 *off, int thresh)
702{
703 struct btrfs_path *path;
704 struct btrfs_key min_key;
705 struct btrfs_key max_key;
706 struct extent_buffer *leaf;
707 struct btrfs_file_extent_item *extent;
708 int type;
709 int ret;
710
711 path = btrfs_alloc_path();
712 if (!path)
713 return -ENOMEM;
714
715 min_key.objectid = inode->i_ino;
716 min_key.type = BTRFS_EXTENT_DATA_KEY;
717 min_key.offset = *off;
718
719 max_key.objectid = inode->i_ino;
720 max_key.type = (u8)-1;
721 max_key.offset = (u64)-1;
722
723 path->keep_locks = 1;
724
725 while(1) {
726 ret = btrfs_search_forward(root, &min_key, &max_key,
727 path, 0, newer_than);
728 if (ret != 0)
729 goto none;
730 if (min_key.objectid != inode->i_ino)
731 goto none;
732 if (min_key.type != BTRFS_EXTENT_DATA_KEY)
733 goto none;
734
735 leaf = path->nodes[0];
736 extent = btrfs_item_ptr(leaf, path->slots[0],
737 struct btrfs_file_extent_item);
738
739 type = btrfs_file_extent_type(leaf, extent);
740 if (type == BTRFS_FILE_EXTENT_REG &&
741 btrfs_file_extent_num_bytes(leaf, extent) < thresh &&
742 check_defrag_in_cache(inode, min_key.offset, thresh)) {
743 *off = min_key.offset;
744 btrfs_free_path(path);
745 return 0;
746 }
747
748 if (min_key.offset == (u64)-1)
749 goto none;
750
751 min_key.offset++;
752 btrfs_release_path(path);
753 }
754none:
755 btrfs_free_path(path);
756 return -ENOENT;
757}
758
658static int should_defrag_range(struct inode *inode, u64 start, u64 len, 759static int should_defrag_range(struct inode *inode, u64 start, u64 len,
659 int thresh, u64 *last_len, u64 *skip, 760 int thresh, u64 *last_len, u64 *skip,
660 u64 *defrag_end) 761 u64 *defrag_end)
@@ -664,10 +765,6 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
664 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 765 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
665 int ret = 1; 766 int ret = 1;
666 767
667
668 if (thresh == 0)
669 thresh = 256 * 1024;
670
671 /* 768 /*
672 * make sure that once we start defragging and extent, we keep on 769 * make sure that once we start defragging and extent, we keep on
673 * defragging it 770 * defragging it
@@ -726,27 +823,176 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
726 return ret; 823 return ret;
727} 824}
728 825
729static int btrfs_defrag_file(struct file *file, 826/*
730 struct btrfs_ioctl_defrag_range_args *range) 827 * it doesn't do much good to defrag one or two pages
828 * at a time. This pulls in a nice chunk of pages
829 * to COW and defrag.
830 *
831 * It also makes sure the delalloc code has enough
832 * dirty data to avoid making new small extents as part
833 * of the defrag
834 *
835 * It's a good idea to start RA on this range
836 * before calling this.
837 */
838static int cluster_pages_for_defrag(struct inode *inode,
839 struct page **pages,
840 unsigned long start_index,
841 int num_pages)
731{ 842{
732 struct inode *inode = fdentry(file)->d_inode; 843 unsigned long file_end;
733 struct btrfs_root *root = BTRFS_I(inode)->root; 844 u64 isize = i_size_read(inode);
734 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 845 u64 page_start;
846 u64 page_end;
847 int ret;
848 int i;
849 int i_done;
735 struct btrfs_ordered_extent *ordered; 850 struct btrfs_ordered_extent *ordered;
736 struct page *page; 851 struct extent_state *cached_state = NULL;
852
853 if (isize == 0)
854 return 0;
855 file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
856
857 ret = btrfs_delalloc_reserve_space(inode,
858 num_pages << PAGE_CACHE_SHIFT);
859 if (ret)
860 return ret;
861again:
862 ret = 0;
863 i_done = 0;
864
865 /* step one, lock all the pages */
866 for (i = 0; i < num_pages; i++) {
867 struct page *page;
868 page = grab_cache_page(inode->i_mapping,
869 start_index + i);
870 if (!page)
871 break;
872
873 if (!PageUptodate(page)) {
874 btrfs_readpage(NULL, page);
875 lock_page(page);
876 if (!PageUptodate(page)) {
877 unlock_page(page);
878 page_cache_release(page);
879 ret = -EIO;
880 break;
881 }
882 }
883 isize = i_size_read(inode);
884 file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
885 if (!isize || page->index > file_end ||
886 page->mapping != inode->i_mapping) {
887 /* whoops, we blew past eof, skip this page */
888 unlock_page(page);
889 page_cache_release(page);
890 break;
891 }
892 pages[i] = page;
893 i_done++;
894 }
895 if (!i_done || ret)
896 goto out;
897
898 if (!(inode->i_sb->s_flags & MS_ACTIVE))
899 goto out;
900
901 /*
902 * so now we have a nice long stream of locked
903 * and up to date pages, lets wait on them
904 */
905 for (i = 0; i < i_done; i++)
906 wait_on_page_writeback(pages[i]);
907
908 page_start = page_offset(pages[0]);
909 page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE;
910
911 lock_extent_bits(&BTRFS_I(inode)->io_tree,
912 page_start, page_end - 1, 0, &cached_state,
913 GFP_NOFS);
914 ordered = btrfs_lookup_first_ordered_extent(inode, page_end - 1);
915 if (ordered &&
916 ordered->file_offset + ordered->len > page_start &&
917 ordered->file_offset < page_end) {
918 btrfs_put_ordered_extent(ordered);
919 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
920 page_start, page_end - 1,
921 &cached_state, GFP_NOFS);
922 for (i = 0; i < i_done; i++) {
923 unlock_page(pages[i]);
924 page_cache_release(pages[i]);
925 }
926 btrfs_wait_ordered_range(inode, page_start,
927 page_end - page_start);
928 goto again;
929 }
930 if (ordered)
931 btrfs_put_ordered_extent(ordered);
932
933 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
934 page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
935 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
936 GFP_NOFS);
937
938 if (i_done != num_pages) {
939 atomic_inc(&BTRFS_I(inode)->outstanding_extents);
940 btrfs_delalloc_release_space(inode,
941 (num_pages - i_done) << PAGE_CACHE_SHIFT);
942 }
943
944
945 btrfs_set_extent_delalloc(inode, page_start, page_end - 1,
946 &cached_state);
947
948 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
949 page_start, page_end - 1, &cached_state,
950 GFP_NOFS);
951
952 for (i = 0; i < i_done; i++) {
953 clear_page_dirty_for_io(pages[i]);
954 ClearPageChecked(pages[i]);
955 set_page_extent_mapped(pages[i]);
956 set_page_dirty(pages[i]);
957 unlock_page(pages[i]);
958 page_cache_release(pages[i]);
959 }
960 return i_done;
961out:
962 for (i = 0; i < i_done; i++) {
963 unlock_page(pages[i]);
964 page_cache_release(pages[i]);
965 }
966 btrfs_delalloc_release_space(inode, num_pages << PAGE_CACHE_SHIFT);
967 return ret;
968
969}
970
971int btrfs_defrag_file(struct inode *inode, struct file *file,
972 struct btrfs_ioctl_defrag_range_args *range,
973 u64 newer_than, unsigned long max_to_defrag)
974{
975 struct btrfs_root *root = BTRFS_I(inode)->root;
737 struct btrfs_super_block *disk_super; 976 struct btrfs_super_block *disk_super;
977 struct file_ra_state *ra = NULL;
738 unsigned long last_index; 978 unsigned long last_index;
739 unsigned long ra_pages = root->fs_info->bdi.ra_pages;
740 unsigned long total_read = 0;
741 u64 features; 979 u64 features;
742 u64 page_start;
743 u64 page_end;
744 u64 last_len = 0; 980 u64 last_len = 0;
745 u64 skip = 0; 981 u64 skip = 0;
746 u64 defrag_end = 0; 982 u64 defrag_end = 0;
983 u64 newer_off = range->start;
984 int newer_left = 0;
747 unsigned long i; 985 unsigned long i;
748 int ret; 986 int ret;
987 int defrag_count = 0;
749 int compress_type = BTRFS_COMPRESS_ZLIB; 988 int compress_type = BTRFS_COMPRESS_ZLIB;
989 int extent_thresh = range->extent_thresh;
990 int newer_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT;
991 u64 new_align = ~((u64)128 * 1024 - 1);
992 struct page **pages = NULL;
993
994 if (extent_thresh == 0)
995 extent_thresh = 256 * 1024;
750 996
751 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) { 997 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) {
752 if (range->compress_type > BTRFS_COMPRESS_TYPES) 998 if (range->compress_type > BTRFS_COMPRESS_TYPES)
@@ -758,6 +1004,27 @@ static int btrfs_defrag_file(struct file *file,
758 if (inode->i_size == 0) 1004 if (inode->i_size == 0)
759 return 0; 1005 return 0;
760 1006
1007 /*
1008 * if we were not given a file, allocate a readahead
1009 * context
1010 */
1011 if (!file) {
1012 ra = kzalloc(sizeof(*ra), GFP_NOFS);
1013 if (!ra)
1014 return -ENOMEM;
1015 file_ra_state_init(ra, inode->i_mapping);
1016 } else {
1017 ra = &file->f_ra;
1018 }
1019
1020 pages = kmalloc(sizeof(struct page *) * newer_cluster,
1021 GFP_NOFS);
1022 if (!pages) {
1023 ret = -ENOMEM;
1024 goto out_ra;
1025 }
1026
1027 /* find the last page to defrag */
761 if (range->start + range->len > range->start) { 1028 if (range->start + range->len > range->start) {
762 last_index = min_t(u64, inode->i_size - 1, 1029 last_index = min_t(u64, inode->i_size - 1,
763 range->start + range->len - 1) >> PAGE_CACHE_SHIFT; 1030 range->start + range->len - 1) >> PAGE_CACHE_SHIFT;
@@ -765,11 +1032,37 @@ static int btrfs_defrag_file(struct file *file,
765 last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT; 1032 last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT;
766 } 1033 }
767 1034
768 i = range->start >> PAGE_CACHE_SHIFT; 1035 if (newer_than) {
769 while (i <= last_index) { 1036 ret = find_new_extents(root, inode, newer_than,
770 if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, 1037 &newer_off, 64 * 1024);
1038 if (!ret) {
1039 range->start = newer_off;
1040 /*
1041 * we always align our defrag to help keep
1042 * the extents in the file evenly spaced
1043 */
1044 i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
1045 newer_left = newer_cluster;
1046 } else
1047 goto out_ra;
1048 } else {
1049 i = range->start >> PAGE_CACHE_SHIFT;
1050 }
1051 if (!max_to_defrag)
1052 max_to_defrag = last_index - 1;
1053
1054 while (i <= last_index && defrag_count < max_to_defrag) {
1055 /*
1056 * make sure we stop running if someone unmounts
1057 * the FS
1058 */
1059 if (!(inode->i_sb->s_flags & MS_ACTIVE))
1060 break;
1061
1062 if (!newer_than &&
1063 !should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
771 PAGE_CACHE_SIZE, 1064 PAGE_CACHE_SIZE,
772 range->extent_thresh, 1065 extent_thresh,
773 &last_len, &skip, 1066 &last_len, &skip,
774 &defrag_end)) { 1067 &defrag_end)) {
775 unsigned long next; 1068 unsigned long next;
@@ -781,92 +1074,39 @@ static int btrfs_defrag_file(struct file *file,
781 i = max(i + 1, next); 1074 i = max(i + 1, next);
782 continue; 1075 continue;
783 } 1076 }
784
785 if (total_read % ra_pages == 0) {
786 btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i,
787 min(last_index, i + ra_pages - 1));
788 }
789 total_read++;
790 mutex_lock(&inode->i_mutex);
791 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) 1077 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)
792 BTRFS_I(inode)->force_compress = compress_type; 1078 BTRFS_I(inode)->force_compress = compress_type;
793 1079
794 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); 1080 btrfs_force_ra(inode->i_mapping, ra, file, i, newer_cluster);
795 if (ret)
796 goto err_unlock;
797again:
798 if (inode->i_size == 0 ||
799 i > ((inode->i_size - 1) >> PAGE_CACHE_SHIFT)) {
800 ret = 0;
801 goto err_reservations;
802 }
803 1081
804 page = grab_cache_page(inode->i_mapping, i); 1082 ret = cluster_pages_for_defrag(inode, pages, i, newer_cluster);
805 if (!page) { 1083 if (ret < 0)
806 ret = -ENOMEM; 1084 goto out_ra;
807 goto err_reservations;
808 }
809
810 if (!PageUptodate(page)) {
811 btrfs_readpage(NULL, page);
812 lock_page(page);
813 if (!PageUptodate(page)) {
814 unlock_page(page);
815 page_cache_release(page);
816 ret = -EIO;
817 goto err_reservations;
818 }
819 }
820
821 if (page->mapping != inode->i_mapping) {
822 unlock_page(page);
823 page_cache_release(page);
824 goto again;
825 }
826 1085
827 wait_on_page_writeback(page); 1086 defrag_count += ret;
1087 balance_dirty_pages_ratelimited_nr(inode->i_mapping, ret);
1088 i += ret;
828 1089
829 if (PageDirty(page)) { 1090 if (newer_than) {
830 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); 1091 if (newer_off == (u64)-1)
831 goto loop_unlock; 1092 break;
832 }
833
834 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
835 page_end = page_start + PAGE_CACHE_SIZE - 1;
836 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
837 1093
838 ordered = btrfs_lookup_ordered_extent(inode, page_start); 1094 newer_off = max(newer_off + 1,
839 if (ordered) { 1095 (u64)i << PAGE_CACHE_SHIFT);
840 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 1096
841 unlock_page(page); 1097 ret = find_new_extents(root, inode,
842 page_cache_release(page); 1098 newer_than, &newer_off,
843 btrfs_start_ordered_extent(inode, ordered, 1); 1099 64 * 1024);
844 btrfs_put_ordered_extent(ordered); 1100 if (!ret) {
845 goto again; 1101 range->start = newer_off;
1102 i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
1103 newer_left = newer_cluster;
1104 } else {
1105 break;
1106 }
1107 } else {
1108 i++;
846 } 1109 }
847 set_page_extent_mapped(page);
848
849 /*
850 * this makes sure page_mkwrite is called on the
851 * page if it is dirtied again later
852 */
853 clear_page_dirty_for_io(page);
854 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start,
855 page_end, EXTENT_DIRTY | EXTENT_DELALLOC |
856 EXTENT_DO_ACCOUNTING, GFP_NOFS);
857
858 btrfs_set_extent_delalloc(inode, page_start, page_end, NULL);
859 ClearPageChecked(page);
860 set_page_dirty(page);
861 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
862
863loop_unlock:
864 unlock_page(page);
865 page_cache_release(page);
866 mutex_unlock(&inode->i_mutex);
867
868 balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1);
869 i++;
870 } 1110 }
871 1111
872 if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) 1112 if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO))
@@ -898,12 +1138,14 @@ loop_unlock:
898 btrfs_set_super_incompat_flags(disk_super, features); 1138 btrfs_set_super_incompat_flags(disk_super, features);
899 } 1139 }
900 1140
901 return 0; 1141 if (!file)
1142 kfree(ra);
1143 return defrag_count;
902 1144
903err_reservations: 1145out_ra:
904 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); 1146 if (!file)
905err_unlock: 1147 kfree(ra);
906 mutex_unlock(&inode->i_mutex); 1148 kfree(pages);
907 return ret; 1149 return ret;
908} 1150}
909 1151
@@ -1129,7 +1371,7 @@ static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
1129 int ret = 0; 1371 int ret = 0;
1130 u64 flags = 0; 1372 u64 flags = 0;
1131 1373
1132 if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) 1374 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
1133 return -EINVAL; 1375 return -EINVAL;
1134 1376
1135 down_read(&root->fs_info->subvol_sem); 1377 down_read(&root->fs_info->subvol_sem);
@@ -1156,7 +1398,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
1156 if (root->fs_info->sb->s_flags & MS_RDONLY) 1398 if (root->fs_info->sb->s_flags & MS_RDONLY)
1157 return -EROFS; 1399 return -EROFS;
1158 1400
1159 if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) 1401 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
1160 return -EINVAL; 1402 return -EINVAL;
1161 1403
1162 if (copy_from_user(&flags, arg, sizeof(flags))) 1404 if (copy_from_user(&flags, arg, sizeof(flags)))
@@ -1279,7 +1521,6 @@ static noinline int copy_to_sk(struct btrfs_root *root,
1279 int nritems; 1521 int nritems;
1280 int i; 1522 int i;
1281 int slot; 1523 int slot;
1282 int found = 0;
1283 int ret = 0; 1524 int ret = 0;
1284 1525
1285 leaf = path->nodes[0]; 1526 leaf = path->nodes[0];
@@ -1326,7 +1567,7 @@ static noinline int copy_to_sk(struct btrfs_root *root,
1326 item_off, item_len); 1567 item_off, item_len);
1327 *sk_offset += item_len; 1568 *sk_offset += item_len;
1328 } 1569 }
1329 found++; 1570 (*num_found)++;
1330 1571
1331 if (*num_found >= sk->nr_items) 1572 if (*num_found >= sk->nr_items)
1332 break; 1573 break;
@@ -1345,7 +1586,6 @@ advance_key:
1345 } else 1586 } else
1346 ret = 1; 1587 ret = 1;
1347overflow: 1588overflow:
1348 *num_found += found;
1349 return ret; 1589 return ret;
1350} 1590}
1351 1591
@@ -1402,7 +1642,7 @@ static noinline int search_ioctl(struct inode *inode,
1402 } 1642 }
1403 ret = copy_to_sk(root, path, &key, sk, args->buf, 1643 ret = copy_to_sk(root, path, &key, sk, args->buf,
1404 &sk_offset, &num_found); 1644 &sk_offset, &num_found);
1405 btrfs_release_path(root, path); 1645 btrfs_release_path(path);
1406 if (ret || num_found >= sk->nr_items) 1646 if (ret || num_found >= sk->nr_items)
1407 break; 1647 break;
1408 1648
@@ -1509,7 +1749,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
1509 if (key.offset == BTRFS_FIRST_FREE_OBJECTID) 1749 if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
1510 break; 1750 break;
1511 1751
1512 btrfs_release_path(root, path); 1752 btrfs_release_path(path);
1513 key.objectid = key.offset; 1753 key.objectid = key.offset;
1514 key.offset = (u64)-1; 1754 key.offset = (u64)-1;
1515 dirid = key.objectid; 1755 dirid = key.objectid;
@@ -1639,7 +1879,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
1639 goto out_dput; 1879 goto out_dput;
1640 } 1880 }
1641 1881
1642 if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) { 1882 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
1643 err = -EINVAL; 1883 err = -EINVAL;
1644 goto out_dput; 1884 goto out_dput;
1645 } 1885 }
@@ -1757,7 +1997,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
1757 /* the rest are all set to zero by kzalloc */ 1997 /* the rest are all set to zero by kzalloc */
1758 range->len = (u64)-1; 1998 range->len = (u64)-1;
1759 } 1999 }
1760 ret = btrfs_defrag_file(file, range); 2000 ret = btrfs_defrag_file(fdentry(file)->d_inode, file,
2001 range, 0, 0);
2002 if (ret > 0)
2003 ret = 0;
1761 kfree(range); 2004 kfree(range);
1762 break; 2005 break;
1763 default: 2006 default:
@@ -1809,6 +2052,75 @@ static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg)
1809 return ret; 2052 return ret;
1810} 2053}
1811 2054
2055static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
2056{
2057 struct btrfs_ioctl_fs_info_args fi_args;
2058 struct btrfs_device *device;
2059 struct btrfs_device *next;
2060 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2061
2062 if (!capable(CAP_SYS_ADMIN))
2063 return -EPERM;
2064
2065 fi_args.num_devices = fs_devices->num_devices;
2066 fi_args.max_id = 0;
2067 memcpy(&fi_args.fsid, root->fs_info->fsid, sizeof(fi_args.fsid));
2068
2069 mutex_lock(&fs_devices->device_list_mutex);
2070 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
2071 if (device->devid > fi_args.max_id)
2072 fi_args.max_id = device->devid;
2073 }
2074 mutex_unlock(&fs_devices->device_list_mutex);
2075
2076 if (copy_to_user(arg, &fi_args, sizeof(fi_args)))
2077 return -EFAULT;
2078
2079 return 0;
2080}
2081
2082static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
2083{
2084 struct btrfs_ioctl_dev_info_args *di_args;
2085 struct btrfs_device *dev;
2086 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2087 int ret = 0;
2088 char *s_uuid = NULL;
2089 char empty_uuid[BTRFS_UUID_SIZE] = {0};
2090
2091 if (!capable(CAP_SYS_ADMIN))
2092 return -EPERM;
2093
2094 di_args = memdup_user(arg, sizeof(*di_args));
2095 if (IS_ERR(di_args))
2096 return PTR_ERR(di_args);
2097
2098 if (memcmp(empty_uuid, di_args->uuid, BTRFS_UUID_SIZE) != 0)
2099 s_uuid = di_args->uuid;
2100
2101 mutex_lock(&fs_devices->device_list_mutex);
2102 dev = btrfs_find_device(root, di_args->devid, s_uuid, NULL);
2103 mutex_unlock(&fs_devices->device_list_mutex);
2104
2105 if (!dev) {
2106 ret = -ENODEV;
2107 goto out;
2108 }
2109
2110 di_args->devid = dev->devid;
2111 di_args->bytes_used = dev->bytes_used;
2112 di_args->total_bytes = dev->total_bytes;
2113 memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
2114 strncpy(di_args->path, dev->name, sizeof(di_args->path));
2115
2116out:
2117 if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
2118 ret = -EFAULT;
2119
2120 kfree(di_args);
2121 return ret;
2122}
2123
1812static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, 2124static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1813 u64 off, u64 olen, u64 destoff) 2125 u64 off, u64 olen, u64 destoff)
1814{ 2126{
@@ -1925,7 +2237,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1925 } 2237 }
1926 2238
1927 /* clone data */ 2239 /* clone data */
1928 key.objectid = src->i_ino; 2240 key.objectid = btrfs_ino(src);
1929 key.type = BTRFS_EXTENT_DATA_KEY; 2241 key.type = BTRFS_EXTENT_DATA_KEY;
1930 key.offset = 0; 2242 key.offset = 0;
1931 2243
@@ -1952,7 +2264,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1952 2264
1953 btrfs_item_key_to_cpu(leaf, &key, slot); 2265 btrfs_item_key_to_cpu(leaf, &key, slot);
1954 if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY || 2266 if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
1955 key.objectid != src->i_ino) 2267 key.objectid != btrfs_ino(src))
1956 break; 2268 break;
1957 2269
1958 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) { 2270 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
@@ -1988,14 +2300,14 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1988 datal = btrfs_file_extent_ram_bytes(leaf, 2300 datal = btrfs_file_extent_ram_bytes(leaf,
1989 extent); 2301 extent);
1990 } 2302 }
1991 btrfs_release_path(root, path); 2303 btrfs_release_path(path);
1992 2304
1993 if (key.offset + datal <= off || 2305 if (key.offset + datal <= off ||
1994 key.offset >= off+len) 2306 key.offset >= off+len)
1995 goto next; 2307 goto next;
1996 2308
1997 memcpy(&new_key, &key, sizeof(new_key)); 2309 memcpy(&new_key, &key, sizeof(new_key));
1998 new_key.objectid = inode->i_ino; 2310 new_key.objectid = btrfs_ino(inode);
1999 if (off <= key.offset) 2311 if (off <= key.offset)
2000 new_key.offset = key.offset + destoff - off; 2312 new_key.offset = key.offset + destoff - off;
2001 else 2313 else
@@ -2049,7 +2361,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2049 ret = btrfs_inc_extent_ref(trans, root, 2361 ret = btrfs_inc_extent_ref(trans, root,
2050 disko, diskl, 0, 2362 disko, diskl, 0,
2051 root->root_key.objectid, 2363 root->root_key.objectid,
2052 inode->i_ino, 2364 btrfs_ino(inode),
2053 new_key.offset - datao); 2365 new_key.offset - datao);
2054 BUG_ON(ret); 2366 BUG_ON(ret);
2055 } 2367 }
@@ -2098,7 +2410,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2098 } 2410 }
2099 2411
2100 btrfs_mark_buffer_dirty(leaf); 2412 btrfs_mark_buffer_dirty(leaf);
2101 btrfs_release_path(root, path); 2413 btrfs_release_path(path);
2102 2414
2103 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 2415 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2104 2416
@@ -2119,12 +2431,12 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2119 btrfs_end_transaction(trans, root); 2431 btrfs_end_transaction(trans, root);
2120 } 2432 }
2121next: 2433next:
2122 btrfs_release_path(root, path); 2434 btrfs_release_path(path);
2123 key.offset++; 2435 key.offset++;
2124 } 2436 }
2125 ret = 0; 2437 ret = 0;
2126out: 2438out:
2127 btrfs_release_path(root, path); 2439 btrfs_release_path(path);
2128 unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); 2440 unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
2129out_unlock: 2441out_unlock:
2130 mutex_unlock(&src->i_mutex); 2442 mutex_unlock(&src->i_mutex);
@@ -2471,6 +2783,58 @@ static noinline long btrfs_ioctl_wait_sync(struct file *file, void __user *argp)
2471 return btrfs_wait_for_commit(root, transid); 2783 return btrfs_wait_for_commit(root, transid);
2472} 2784}
2473 2785
2786static long btrfs_ioctl_scrub(struct btrfs_root *root, void __user *arg)
2787{
2788 int ret;
2789 struct btrfs_ioctl_scrub_args *sa;
2790
2791 if (!capable(CAP_SYS_ADMIN))
2792 return -EPERM;
2793
2794 sa = memdup_user(arg, sizeof(*sa));
2795 if (IS_ERR(sa))
2796 return PTR_ERR(sa);
2797
2798 ret = btrfs_scrub_dev(root, sa->devid, sa->start, sa->end,
2799 &sa->progress, sa->flags & BTRFS_SCRUB_READONLY);
2800
2801 if (copy_to_user(arg, sa, sizeof(*sa)))
2802 ret = -EFAULT;
2803
2804 kfree(sa);
2805 return ret;
2806}
2807
2808static long btrfs_ioctl_scrub_cancel(struct btrfs_root *root, void __user *arg)
2809{
2810 if (!capable(CAP_SYS_ADMIN))
2811 return -EPERM;
2812
2813 return btrfs_scrub_cancel(root);
2814}
2815
2816static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
2817 void __user *arg)
2818{
2819 struct btrfs_ioctl_scrub_args *sa;
2820 int ret;
2821
2822 if (!capable(CAP_SYS_ADMIN))
2823 return -EPERM;
2824
2825 sa = memdup_user(arg, sizeof(*sa));
2826 if (IS_ERR(sa))
2827 return PTR_ERR(sa);
2828
2829 ret = btrfs_scrub_progress(root, sa->devid, &sa->progress);
2830
2831 if (copy_to_user(arg, sa, sizeof(*sa)))
2832 ret = -EFAULT;
2833
2834 kfree(sa);
2835 return ret;
2836}
2837
2474long btrfs_ioctl(struct file *file, unsigned int 2838long btrfs_ioctl(struct file *file, unsigned int
2475 cmd, unsigned long arg) 2839 cmd, unsigned long arg)
2476{ 2840{
@@ -2510,6 +2874,10 @@ long btrfs_ioctl(struct file *file, unsigned int
2510 return btrfs_ioctl_add_dev(root, argp); 2874 return btrfs_ioctl_add_dev(root, argp);
2511 case BTRFS_IOC_RM_DEV: 2875 case BTRFS_IOC_RM_DEV:
2512 return btrfs_ioctl_rm_dev(root, argp); 2876 return btrfs_ioctl_rm_dev(root, argp);
2877 case BTRFS_IOC_FS_INFO:
2878 return btrfs_ioctl_fs_info(root, argp);
2879 case BTRFS_IOC_DEV_INFO:
2880 return btrfs_ioctl_dev_info(root, argp);
2513 case BTRFS_IOC_BALANCE: 2881 case BTRFS_IOC_BALANCE:
2514 return btrfs_balance(root->fs_info->dev_root); 2882 return btrfs_balance(root->fs_info->dev_root);
2515 case BTRFS_IOC_CLONE: 2883 case BTRFS_IOC_CLONE:
@@ -2533,6 +2901,12 @@ long btrfs_ioctl(struct file *file, unsigned int
2533 return btrfs_ioctl_start_sync(file, argp); 2901 return btrfs_ioctl_start_sync(file, argp);
2534 case BTRFS_IOC_WAIT_SYNC: 2902 case BTRFS_IOC_WAIT_SYNC:
2535 return btrfs_ioctl_wait_sync(file, argp); 2903 return btrfs_ioctl_wait_sync(file, argp);
2904 case BTRFS_IOC_SCRUB:
2905 return btrfs_ioctl_scrub(root, argp);
2906 case BTRFS_IOC_SCRUB_CANCEL:
2907 return btrfs_ioctl_scrub_cancel(root, argp);
2908 case BTRFS_IOC_SCRUB_PROGRESS:
2909 return btrfs_ioctl_scrub_progress(root, argp);
2536 } 2910 }
2537 2911
2538 return -ENOTTY; 2912 return -ENOTTY;
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
index 8fb382167b13..ad1ea789fcb4 100644
--- a/fs/btrfs/ioctl.h
+++ b/fs/btrfs/ioctl.h
@@ -32,6 +32,8 @@ struct btrfs_ioctl_vol_args {
32 32
33#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0) 33#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
34#define BTRFS_SUBVOL_RDONLY (1ULL << 1) 34#define BTRFS_SUBVOL_RDONLY (1ULL << 1)
35#define BTRFS_FSID_SIZE 16
36#define BTRFS_UUID_SIZE 16
35 37
36#define BTRFS_SUBVOL_NAME_MAX 4039 38#define BTRFS_SUBVOL_NAME_MAX 4039
37struct btrfs_ioctl_vol_args_v2 { 39struct btrfs_ioctl_vol_args_v2 {
@@ -42,6 +44,71 @@ struct btrfs_ioctl_vol_args_v2 {
42 char name[BTRFS_SUBVOL_NAME_MAX + 1]; 44 char name[BTRFS_SUBVOL_NAME_MAX + 1];
43}; 45};
44 46
47/*
48 * structure to report errors and progress to userspace, either as a
49 * result of a finished scrub, a canceled scrub or a progress inquiry
50 */
51struct btrfs_scrub_progress {
52 __u64 data_extents_scrubbed; /* # of data extents scrubbed */
53 __u64 tree_extents_scrubbed; /* # of tree extents scrubbed */
54 __u64 data_bytes_scrubbed; /* # of data bytes scrubbed */
55 __u64 tree_bytes_scrubbed; /* # of tree bytes scrubbed */
56 __u64 read_errors; /* # of read errors encountered (EIO) */
57 __u64 csum_errors; /* # of failed csum checks */
58 __u64 verify_errors; /* # of occurences, where the metadata
59 * of a tree block did not match the
60 * expected values, like generation or
61 * logical */
62 __u64 no_csum; /* # of 4k data block for which no csum
63 * is present, probably the result of
64 * data written with nodatasum */
65 __u64 csum_discards; /* # of csum for which no data was found
66 * in the extent tree. */
67 __u64 super_errors; /* # of bad super blocks encountered */
68 __u64 malloc_errors; /* # of internal kmalloc errors. These
69 * will likely cause an incomplete
70 * scrub */
71 __u64 uncorrectable_errors; /* # of errors where either no intact
72 * copy was found or the writeback
73 * failed */
74 __u64 corrected_errors; /* # of errors corrected */
75 __u64 last_physical; /* last physical address scrubbed. In
76 * case a scrub was aborted, this can
77 * be used to restart the scrub */
78 __u64 unverified_errors; /* # of occurences where a read for a
79 * full (64k) bio failed, but the re-
80 * check succeeded for each 4k piece.
81 * Intermittent error. */
82};
83
84#define BTRFS_SCRUB_READONLY 1
85struct btrfs_ioctl_scrub_args {
86 __u64 devid; /* in */
87 __u64 start; /* in */
88 __u64 end; /* in */
89 __u64 flags; /* in */
90 struct btrfs_scrub_progress progress; /* out */
91 /* pad to 1k */
92 __u64 unused[(1024-32-sizeof(struct btrfs_scrub_progress))/8];
93};
94
95#define BTRFS_DEVICE_PATH_NAME_MAX 1024
96struct btrfs_ioctl_dev_info_args {
97 __u64 devid; /* in/out */
98 __u8 uuid[BTRFS_UUID_SIZE]; /* in/out */
99 __u64 bytes_used; /* out */
100 __u64 total_bytes; /* out */
101 __u64 unused[379]; /* pad to 4k */
102 __u8 path[BTRFS_DEVICE_PATH_NAME_MAX]; /* out */
103};
104
105struct btrfs_ioctl_fs_info_args {
106 __u64 max_id; /* out */
107 __u64 num_devices; /* out */
108 __u8 fsid[BTRFS_FSID_SIZE]; /* out */
109 __u64 reserved[124]; /* pad to 1k */
110};
111
45#define BTRFS_INO_LOOKUP_PATH_MAX 4080 112#define BTRFS_INO_LOOKUP_PATH_MAX 4080
46struct btrfs_ioctl_ino_lookup_args { 113struct btrfs_ioctl_ino_lookup_args {
47 __u64 treeid; 114 __u64 treeid;
@@ -114,37 +181,6 @@ struct btrfs_ioctl_clone_range_args {
114#define BTRFS_DEFRAG_RANGE_COMPRESS 1 181#define BTRFS_DEFRAG_RANGE_COMPRESS 1
115#define BTRFS_DEFRAG_RANGE_START_IO 2 182#define BTRFS_DEFRAG_RANGE_START_IO 2
116 183
117struct btrfs_ioctl_defrag_range_args {
118 /* start of the defrag operation */
119 __u64 start;
120
121 /* number of bytes to defrag, use (u64)-1 to say all */
122 __u64 len;
123
124 /*
125 * flags for the operation, which can include turning
126 * on compression for this one defrag
127 */
128 __u64 flags;
129
130 /*
131 * any extent bigger than this will be considered
132 * already defragged. Use 0 to take the kernel default
133 * Use 1 to say every single extent must be rewritten
134 */
135 __u32 extent_thresh;
136
137 /*
138 * which compression method to use if turning on compression
139 * for this defrag operation. If unspecified, zlib will
140 * be used
141 */
142 __u32 compress_type;
143
144 /* spare for later */
145 __u32 unused[4];
146};
147
148struct btrfs_ioctl_space_info { 184struct btrfs_ioctl_space_info {
149 __u64 flags; 185 __u64 flags;
150 __u64 total_bytes; 186 __u64 total_bytes;
@@ -203,4 +239,13 @@ struct btrfs_ioctl_space_args {
203 struct btrfs_ioctl_vol_args_v2) 239 struct btrfs_ioctl_vol_args_v2)
204#define BTRFS_IOC_SUBVOL_GETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 25, __u64) 240#define BTRFS_IOC_SUBVOL_GETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 25, __u64)
205#define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64) 241#define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64)
242#define BTRFS_IOC_SCRUB _IOWR(BTRFS_IOCTL_MAGIC, 27, \
243 struct btrfs_ioctl_scrub_args)
244#define BTRFS_IOC_SCRUB_CANCEL _IO(BTRFS_IOCTL_MAGIC, 28)
245#define BTRFS_IOC_SCRUB_PROGRESS _IOWR(BTRFS_IOCTL_MAGIC, 29, \
246 struct btrfs_ioctl_scrub_args)
247#define BTRFS_IOC_DEV_INFO _IOWR(BTRFS_IOCTL_MAGIC, 30, \
248 struct btrfs_ioctl_dev_info_args)
249#define BTRFS_IOC_FS_INFO _IOR(BTRFS_IOCTL_MAGIC, 31, \
250 struct btrfs_ioctl_fs_info_args)
206#endif 251#endif
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 6151f2ea38bb..66fa43dc3f0f 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -185,31 +185,6 @@ sleep:
185 return 0; 185 return 0;
186} 186}
187 187
188/*
189 * Very quick trylock, this does not spin or schedule. It returns
190 * 1 with the spinlock held if it was able to take the lock, or it
191 * returns zero if it was unable to take the lock.
192 *
193 * After this call, scheduling is not safe without first calling
194 * btrfs_set_lock_blocking()
195 */
196int btrfs_try_tree_lock(struct extent_buffer *eb)
197{
198 if (spin_trylock(&eb->lock)) {
199 if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
200 /*
201 * we've got the spinlock, but the real owner is
202 * blocking. Drop the spinlock and return failure
203 */
204 spin_unlock(&eb->lock);
205 return 0;
206 }
207 return 1;
208 }
209 /* someone else has the spinlock giveup */
210 return 0;
211}
212
213int btrfs_tree_unlock(struct extent_buffer *eb) 188int btrfs_tree_unlock(struct extent_buffer *eb)
214{ 189{
215 /* 190 /*
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index 6c4ce457168c..5c33a560a2f1 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -21,8 +21,6 @@
21 21
22int btrfs_tree_lock(struct extent_buffer *eb); 22int btrfs_tree_lock(struct extent_buffer *eb);
23int btrfs_tree_unlock(struct extent_buffer *eb); 23int btrfs_tree_unlock(struct extent_buffer *eb);
24
25int btrfs_try_tree_lock(struct extent_buffer *eb);
26int btrfs_try_spin_lock(struct extent_buffer *eb); 24int btrfs_try_spin_lock(struct extent_buffer *eb);
27 25
28void btrfs_set_lock_blocking(struct extent_buffer *eb); 26void btrfs_set_lock_blocking(struct extent_buffer *eb);
diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c
index a97314cf6bd6..82d569cb6267 100644
--- a/fs/btrfs/ref-cache.c
+++ b/fs/btrfs/ref-cache.c
@@ -23,56 +23,6 @@
23#include "ref-cache.h" 23#include "ref-cache.h"
24#include "transaction.h" 24#include "transaction.h"
25 25
26/*
27 * leaf refs are used to cache the information about which extents
28 * a given leaf has references on. This allows us to process that leaf
29 * in btrfs_drop_snapshot without needing to read it back from disk.
30 */
31
32/*
33 * kmalloc a leaf reference struct and update the counters for the
34 * total ref cache size
35 */
36struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root,
37 int nr_extents)
38{
39 struct btrfs_leaf_ref *ref;
40 size_t size = btrfs_leaf_ref_size(nr_extents);
41
42 ref = kmalloc(size, GFP_NOFS);
43 if (ref) {
44 spin_lock(&root->fs_info->ref_cache_lock);
45 root->fs_info->total_ref_cache_size += size;
46 spin_unlock(&root->fs_info->ref_cache_lock);
47
48 memset(ref, 0, sizeof(*ref));
49 atomic_set(&ref->usage, 1);
50 INIT_LIST_HEAD(&ref->list);
51 }
52 return ref;
53}
54
55/*
56 * free a leaf reference struct and update the counters for the
57 * total ref cache size
58 */
59void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
60{
61 if (!ref)
62 return;
63 WARN_ON(atomic_read(&ref->usage) == 0);
64 if (atomic_dec_and_test(&ref->usage)) {
65 size_t size = btrfs_leaf_ref_size(ref->nritems);
66
67 BUG_ON(ref->in_tree);
68 kfree(ref);
69
70 spin_lock(&root->fs_info->ref_cache_lock);
71 root->fs_info->total_ref_cache_size -= size;
72 spin_unlock(&root->fs_info->ref_cache_lock);
73 }
74}
75
76static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, 26static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
77 struct rb_node *node) 27 struct rb_node *node)
78{ 28{
@@ -116,117 +66,3 @@ static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
116 } 66 }
117 return NULL; 67 return NULL;
118} 68}
119
120int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
121 int shared)
122{
123 struct btrfs_leaf_ref *ref = NULL;
124 struct btrfs_leaf_ref_tree *tree = root->ref_tree;
125
126 if (shared)
127 tree = &root->fs_info->shared_ref_tree;
128 if (!tree)
129 return 0;
130
131 spin_lock(&tree->lock);
132 while (!list_empty(&tree->list)) {
133 ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list);
134 BUG_ON(ref->tree != tree);
135 if (ref->root_gen > max_root_gen)
136 break;
137 if (!xchg(&ref->in_tree, 0)) {
138 cond_resched_lock(&tree->lock);
139 continue;
140 }
141
142 rb_erase(&ref->rb_node, &tree->root);
143 list_del_init(&ref->list);
144
145 spin_unlock(&tree->lock);
146 btrfs_free_leaf_ref(root, ref);
147 cond_resched();
148 spin_lock(&tree->lock);
149 }
150 spin_unlock(&tree->lock);
151 return 0;
152}
153
154/*
155 * find the leaf ref for a given extent. This returns the ref struct with
156 * a usage reference incremented
157 */
158struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
159 u64 bytenr)
160{
161 struct rb_node *rb;
162 struct btrfs_leaf_ref *ref = NULL;
163 struct btrfs_leaf_ref_tree *tree = root->ref_tree;
164again:
165 if (tree) {
166 spin_lock(&tree->lock);
167 rb = tree_search(&tree->root, bytenr);
168 if (rb)
169 ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node);
170 if (ref)
171 atomic_inc(&ref->usage);
172 spin_unlock(&tree->lock);
173 if (ref)
174 return ref;
175 }
176 if (tree != &root->fs_info->shared_ref_tree) {
177 tree = &root->fs_info->shared_ref_tree;
178 goto again;
179 }
180 return NULL;
181}
182
183/*
184 * add a fully filled in leaf ref struct
185 * remove all the refs older than a given root generation
186 */
187int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
188 int shared)
189{
190 int ret = 0;
191 struct rb_node *rb;
192 struct btrfs_leaf_ref_tree *tree = root->ref_tree;
193
194 if (shared)
195 tree = &root->fs_info->shared_ref_tree;
196
197 spin_lock(&tree->lock);
198 rb = tree_insert(&tree->root, ref->bytenr, &ref->rb_node);
199 if (rb) {
200 ret = -EEXIST;
201 } else {
202 atomic_inc(&ref->usage);
203 ref->tree = tree;
204 ref->in_tree = 1;
205 list_add_tail(&ref->list, &tree->list);
206 }
207 spin_unlock(&tree->lock);
208 return ret;
209}
210
211/*
212 * remove a single leaf ref from the tree. This drops the ref held by the tree
213 * only
214 */
215int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
216{
217 struct btrfs_leaf_ref_tree *tree;
218
219 if (!xchg(&ref->in_tree, 0))
220 return 0;
221
222 tree = ref->tree;
223 spin_lock(&tree->lock);
224
225 rb_erase(&ref->rb_node, &tree->root);
226 list_del_init(&ref->list);
227
228 spin_unlock(&tree->lock);
229
230 btrfs_free_leaf_ref(root, ref);
231 return 0;
232}
diff --git a/fs/btrfs/ref-cache.h b/fs/btrfs/ref-cache.h
index e2a55cb2072b..24f7001f6387 100644
--- a/fs/btrfs/ref-cache.h
+++ b/fs/btrfs/ref-cache.h
@@ -49,28 +49,4 @@ static inline size_t btrfs_leaf_ref_size(int nr_extents)
49 return sizeof(struct btrfs_leaf_ref) + 49 return sizeof(struct btrfs_leaf_ref) +
50 sizeof(struct btrfs_extent_info) * nr_extents; 50 sizeof(struct btrfs_extent_info) * nr_extents;
51} 51}
52
53static inline void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree)
54{
55 tree->root = RB_ROOT;
56 INIT_LIST_HEAD(&tree->list);
57 spin_lock_init(&tree->lock);
58}
59
60static inline int btrfs_leaf_ref_tree_empty(struct btrfs_leaf_ref_tree *tree)
61{
62 return RB_EMPTY_ROOT(&tree->root);
63}
64
65void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree);
66struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root,
67 int nr_extents);
68void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
69struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
70 u64 bytenr);
71int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
72 int shared);
73int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
74 int shared);
75int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
76#endif 52#endif
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index f340f7c99d09..ca38eca70af0 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -30,6 +30,7 @@
30#include "btrfs_inode.h" 30#include "btrfs_inode.h"
31#include "async-thread.h" 31#include "async-thread.h"
32#include "free-space-cache.h" 32#include "free-space-cache.h"
33#include "inode-map.h"
33 34
34/* 35/*
35 * backref_node, mapping_node and tree_block start with this 36 * backref_node, mapping_node and tree_block start with this
@@ -507,6 +508,7 @@ static int update_backref_cache(struct btrfs_trans_handle *trans,
507 return 1; 508 return 1;
508} 509}
509 510
511
510static int should_ignore_root(struct btrfs_root *root) 512static int should_ignore_root(struct btrfs_root *root)
511{ 513{
512 struct btrfs_root *reloc_root; 514 struct btrfs_root *reloc_root;
@@ -529,7 +531,6 @@ static int should_ignore_root(struct btrfs_root *root)
529 */ 531 */
530 return 1; 532 return 1;
531} 533}
532
533/* 534/*
534 * find reloc tree by address of tree root 535 * find reloc tree by address of tree root
535 */ 536 */
@@ -961,7 +962,7 @@ again:
961 lower = upper; 962 lower = upper;
962 upper = NULL; 963 upper = NULL;
963 } 964 }
964 btrfs_release_path(root, path2); 965 btrfs_release_path(path2);
965next: 966next:
966 if (ptr < end) { 967 if (ptr < end) {
967 ptr += btrfs_extent_inline_ref_size(key.type); 968 ptr += btrfs_extent_inline_ref_size(key.type);
@@ -974,7 +975,7 @@ next:
974 if (ptr >= end) 975 if (ptr >= end)
975 path1->slots[0]++; 976 path1->slots[0]++;
976 } 977 }
977 btrfs_release_path(rc->extent_root, path1); 978 btrfs_release_path(path1);
978 979
979 cur->checked = 1; 980 cur->checked = 1;
980 WARN_ON(exist); 981 WARN_ON(exist);
@@ -1409,9 +1410,9 @@ again:
1409 prev = node; 1410 prev = node;
1410 entry = rb_entry(node, struct btrfs_inode, rb_node); 1411 entry = rb_entry(node, struct btrfs_inode, rb_node);
1411 1412
1412 if (objectid < entry->vfs_inode.i_ino) 1413 if (objectid < btrfs_ino(&entry->vfs_inode))
1413 node = node->rb_left; 1414 node = node->rb_left;
1414 else if (objectid > entry->vfs_inode.i_ino) 1415 else if (objectid > btrfs_ino(&entry->vfs_inode))
1415 node = node->rb_right; 1416 node = node->rb_right;
1416 else 1417 else
1417 break; 1418 break;
@@ -1419,7 +1420,7 @@ again:
1419 if (!node) { 1420 if (!node) {
1420 while (prev) { 1421 while (prev) {
1421 entry = rb_entry(prev, struct btrfs_inode, rb_node); 1422 entry = rb_entry(prev, struct btrfs_inode, rb_node);
1422 if (objectid <= entry->vfs_inode.i_ino) { 1423 if (objectid <= btrfs_ino(&entry->vfs_inode)) {
1423 node = prev; 1424 node = prev;
1424 break; 1425 break;
1425 } 1426 }
@@ -1434,7 +1435,7 @@ again:
1434 return inode; 1435 return inode;
1435 } 1436 }
1436 1437
1437 objectid = entry->vfs_inode.i_ino + 1; 1438 objectid = btrfs_ino(&entry->vfs_inode) + 1;
1438 if (cond_resched_lock(&root->inode_lock)) 1439 if (cond_resched_lock(&root->inode_lock))
1439 goto again; 1440 goto again;
1440 1441
@@ -1470,7 +1471,7 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
1470 return -ENOMEM; 1471 return -ENOMEM;
1471 1472
1472 bytenr -= BTRFS_I(reloc_inode)->index_cnt; 1473 bytenr -= BTRFS_I(reloc_inode)->index_cnt;
1473 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino, 1474 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode),
1474 bytenr, 0); 1475 bytenr, 0);
1475 if (ret < 0) 1476 if (ret < 0)
1476 goto out; 1477 goto out;
@@ -1558,11 +1559,11 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
1558 if (first) { 1559 if (first) {
1559 inode = find_next_inode(root, key.objectid); 1560 inode = find_next_inode(root, key.objectid);
1560 first = 0; 1561 first = 0;
1561 } else if (inode && inode->i_ino < key.objectid) { 1562 } else if (inode && btrfs_ino(inode) < key.objectid) {
1562 btrfs_add_delayed_iput(inode); 1563 btrfs_add_delayed_iput(inode);
1563 inode = find_next_inode(root, key.objectid); 1564 inode = find_next_inode(root, key.objectid);
1564 } 1565 }
1565 if (inode && inode->i_ino == key.objectid) { 1566 if (inode && btrfs_ino(inode) == key.objectid) {
1566 end = key.offset + 1567 end = key.offset +
1567 btrfs_file_extent_num_bytes(leaf, fi); 1568 btrfs_file_extent_num_bytes(leaf, fi);
1568 WARN_ON(!IS_ALIGNED(key.offset, 1569 WARN_ON(!IS_ALIGNED(key.offset,
@@ -1749,7 +1750,7 @@ again:
1749 1750
1750 btrfs_node_key_to_cpu(path->nodes[level], &key, 1751 btrfs_node_key_to_cpu(path->nodes[level], &key,
1751 path->slots[level]); 1752 path->slots[level]);
1752 btrfs_release_path(src, path); 1753 btrfs_release_path(path);
1753 1754
1754 path->lowest_level = level; 1755 path->lowest_level = level;
1755 ret = btrfs_search_slot(trans, src, &key, path, 0, 1); 1756 ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
@@ -1893,6 +1894,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
1893 struct inode *inode = NULL; 1894 struct inode *inode = NULL;
1894 u64 objectid; 1895 u64 objectid;
1895 u64 start, end; 1896 u64 start, end;
1897 u64 ino;
1896 1898
1897 objectid = min_key->objectid; 1899 objectid = min_key->objectid;
1898 while (1) { 1900 while (1) {
@@ -1905,17 +1907,18 @@ static int invalidate_extent_cache(struct btrfs_root *root,
1905 inode = find_next_inode(root, objectid); 1907 inode = find_next_inode(root, objectid);
1906 if (!inode) 1908 if (!inode)
1907 break; 1909 break;
1910 ino = btrfs_ino(inode);
1908 1911
1909 if (inode->i_ino > max_key->objectid) { 1912 if (ino > max_key->objectid) {
1910 iput(inode); 1913 iput(inode);
1911 break; 1914 break;
1912 } 1915 }
1913 1916
1914 objectid = inode->i_ino + 1; 1917 objectid = ino + 1;
1915 if (!S_ISREG(inode->i_mode)) 1918 if (!S_ISREG(inode->i_mode))
1916 continue; 1919 continue;
1917 1920
1918 if (unlikely(min_key->objectid == inode->i_ino)) { 1921 if (unlikely(min_key->objectid == ino)) {
1919 if (min_key->type > BTRFS_EXTENT_DATA_KEY) 1922 if (min_key->type > BTRFS_EXTENT_DATA_KEY)
1920 continue; 1923 continue;
1921 if (min_key->type < BTRFS_EXTENT_DATA_KEY) 1924 if (min_key->type < BTRFS_EXTENT_DATA_KEY)
@@ -1928,7 +1931,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
1928 start = 0; 1931 start = 0;
1929 } 1932 }
1930 1933
1931 if (unlikely(max_key->objectid == inode->i_ino)) { 1934 if (unlikely(max_key->objectid == ino)) {
1932 if (max_key->type < BTRFS_EXTENT_DATA_KEY) 1935 if (max_key->type < BTRFS_EXTENT_DATA_KEY)
1933 continue; 1936 continue;
1934 if (max_key->type > BTRFS_EXTENT_DATA_KEY) { 1937 if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
@@ -2496,7 +2499,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
2496 path->locks[upper->level] = 0; 2499 path->locks[upper->level] = 0;
2497 2500
2498 slot = path->slots[upper->level]; 2501 slot = path->slots[upper->level];
2499 btrfs_release_path(NULL, path); 2502 btrfs_release_path(path);
2500 } else { 2503 } else {
2501 ret = btrfs_bin_search(upper->eb, key, upper->level, 2504 ret = btrfs_bin_search(upper->eb, key, upper->level,
2502 &slot); 2505 &slot);
@@ -2737,7 +2740,7 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans,
2737 } else { 2740 } else {
2738 path->lowest_level = node->level; 2741 path->lowest_level = node->level;
2739 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2742 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2740 btrfs_release_path(root, path); 2743 btrfs_release_path(path);
2741 if (ret > 0) 2744 if (ret > 0)
2742 ret = 0; 2745 ret = 0;
2743 } 2746 }
@@ -2870,7 +2873,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
2870 struct extent_map *em; 2873 struct extent_map *em;
2871 int ret = 0; 2874 int ret = 0;
2872 2875
2873 em = alloc_extent_map(GFP_NOFS); 2876 em = alloc_extent_map();
2874 if (!em) 2877 if (!em)
2875 return -ENOMEM; 2878 return -ENOMEM;
2876 2879
@@ -3119,7 +3122,7 @@ static int add_tree_block(struct reloc_control *rc,
3119#endif 3122#endif
3120 } 3123 }
3121 3124
3122 btrfs_release_path(rc->extent_root, path); 3125 btrfs_release_path(path);
3123 3126
3124 BUG_ON(level == -1); 3127 BUG_ON(level == -1);
3125 3128
@@ -3220,7 +3223,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
3220 key.offset = 0; 3223 key.offset = 0;
3221 3224
3222 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 3225 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
3223 if (!inode || IS_ERR(inode) || is_bad_inode(inode)) { 3226 if (IS_ERR_OR_NULL(inode) || is_bad_inode(inode)) {
3224 if (inode && !IS_ERR(inode)) 3227 if (inode && !IS_ERR(inode))
3225 iput(inode); 3228 iput(inode);
3226 return -ENOENT; 3229 return -ENOENT;
@@ -3505,7 +3508,7 @@ int add_data_references(struct reloc_control *rc,
3505 } 3508 }
3506 path->slots[0]++; 3509 path->slots[0]++;
3507 } 3510 }
3508 btrfs_release_path(rc->extent_root, path); 3511 btrfs_release_path(path);
3509 if (err) 3512 if (err)
3510 free_block_list(blocks); 3513 free_block_list(blocks);
3511 return err; 3514 return err;
@@ -3568,7 +3571,7 @@ next:
3568 EXTENT_DIRTY); 3571 EXTENT_DIRTY);
3569 3572
3570 if (ret == 0 && start <= key.objectid) { 3573 if (ret == 0 && start <= key.objectid) {
3571 btrfs_release_path(rc->extent_root, path); 3574 btrfs_release_path(path);
3572 rc->search_start = end + 1; 3575 rc->search_start = end + 1;
3573 } else { 3576 } else {
3574 rc->search_start = key.objectid + key.offset; 3577 rc->search_start = key.objectid + key.offset;
@@ -3576,7 +3579,7 @@ next:
3576 return 0; 3579 return 0;
3577 } 3580 }
3578 } 3581 }
3579 btrfs_release_path(rc->extent_root, path); 3582 btrfs_release_path(path);
3580 return ret; 3583 return ret;
3581} 3584}
3582 3585
@@ -3713,7 +3716,7 @@ restart:
3713 flags = BTRFS_EXTENT_FLAG_DATA; 3716 flags = BTRFS_EXTENT_FLAG_DATA;
3714 3717
3715 if (path_change) { 3718 if (path_change) {
3716 btrfs_release_path(rc->extent_root, path); 3719 btrfs_release_path(path);
3717 3720
3718 path->search_commit_root = 1; 3721 path->search_commit_root = 1;
3719 path->skip_locking = 1; 3722 path->skip_locking = 1;
@@ -3736,7 +3739,7 @@ restart:
3736 (flags & BTRFS_EXTENT_FLAG_DATA)) { 3739 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3737 ret = add_data_references(rc, &key, path, &blocks); 3740 ret = add_data_references(rc, &key, path, &blocks);
3738 } else { 3741 } else {
3739 btrfs_release_path(rc->extent_root, path); 3742 btrfs_release_path(path);
3740 ret = 0; 3743 ret = 0;
3741 } 3744 }
3742 if (ret < 0) { 3745 if (ret < 0) {
@@ -3799,7 +3802,7 @@ restart:
3799 } 3802 }
3800 } 3803 }
3801 3804
3802 btrfs_release_path(rc->extent_root, path); 3805 btrfs_release_path(path);
3803 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, 3806 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
3804 GFP_NOFS); 3807 GFP_NOFS);
3805 3808
@@ -3867,7 +3870,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
3867 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS | 3870 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
3868 BTRFS_INODE_PREALLOC); 3871 BTRFS_INODE_PREALLOC);
3869 btrfs_mark_buffer_dirty(leaf); 3872 btrfs_mark_buffer_dirty(leaf);
3870 btrfs_release_path(root, path); 3873 btrfs_release_path(path);
3871out: 3874out:
3872 btrfs_free_path(path); 3875 btrfs_free_path(path);
3873 return ret; 3876 return ret;
@@ -3897,7 +3900,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
3897 if (IS_ERR(trans)) 3900 if (IS_ERR(trans))
3898 return ERR_CAST(trans); 3901 return ERR_CAST(trans);
3899 3902
3900 err = btrfs_find_free_objectid(trans, root, objectid, &objectid); 3903 err = btrfs_find_free_objectid(root, &objectid);
3901 if (err) 3904 if (err)
3902 goto out; 3905 goto out;
3903 3906
@@ -3935,7 +3938,7 @@ static struct reloc_control *alloc_reloc_control(void)
3935 INIT_LIST_HEAD(&rc->reloc_roots); 3938 INIT_LIST_HEAD(&rc->reloc_roots);
3936 backref_cache_init(&rc->backref_cache); 3939 backref_cache_init(&rc->backref_cache);
3937 mapping_tree_init(&rc->reloc_root_tree); 3940 mapping_tree_init(&rc->reloc_root_tree);
3938 extent_io_tree_init(&rc->processed_blocks, NULL, GFP_NOFS); 3941 extent_io_tree_init(&rc->processed_blocks, NULL);
3939 return rc; 3942 return rc;
3940} 3943}
3941 3944
@@ -4109,7 +4112,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
4109 } 4112 }
4110 leaf = path->nodes[0]; 4113 leaf = path->nodes[0];
4111 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4114 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4112 btrfs_release_path(root->fs_info->tree_root, path); 4115 btrfs_release_path(path);
4113 4116
4114 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID || 4117 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
4115 key.type != BTRFS_ROOT_ITEM_KEY) 4118 key.type != BTRFS_ROOT_ITEM_KEY)
@@ -4141,7 +4144,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
4141 4144
4142 key.offset--; 4145 key.offset--;
4143 } 4146 }
4144 btrfs_release_path(root->fs_info->tree_root, path); 4147 btrfs_release_path(path);
4145 4148
4146 if (list_empty(&reloc_roots)) 4149 if (list_empty(&reloc_roots))
4147 goto out; 4150 goto out;
@@ -4242,7 +4245,7 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
4242 4245
4243 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt; 4246 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
4244 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr, 4247 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
4245 disk_bytenr + len - 1, &list); 4248 disk_bytenr + len - 1, &list, 0);
4246 4249
4247 while (!list_empty(&list)) { 4250 while (!list_empty(&list)) {
4248 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 4251 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 6928bff62daa..ebe45443de06 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -22,53 +22,6 @@
22#include "print-tree.h" 22#include "print-tree.h"
23 23
24/* 24/*
25 * search forward for a root, starting with objectid 'search_start'
26 * if a root key is found, the objectid we find is filled into 'found_objectid'
27 * and 0 is returned. < 0 is returned on error, 1 if there is nothing
28 * left in the tree.
29 */
30int btrfs_search_root(struct btrfs_root *root, u64 search_start,
31 u64 *found_objectid)
32{
33 struct btrfs_path *path;
34 struct btrfs_key search_key;
35 int ret;
36
37 root = root->fs_info->tree_root;
38 search_key.objectid = search_start;
39 search_key.type = (u8)-1;
40 search_key.offset = (u64)-1;
41
42 path = btrfs_alloc_path();
43 BUG_ON(!path);
44again:
45 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
46 if (ret < 0)
47 goto out;
48 if (ret == 0) {
49 ret = 1;
50 goto out;
51 }
52 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
53 ret = btrfs_next_leaf(root, path);
54 if (ret)
55 goto out;
56 }
57 btrfs_item_key_to_cpu(path->nodes[0], &search_key, path->slots[0]);
58 if (search_key.type != BTRFS_ROOT_ITEM_KEY) {
59 search_key.offset++;
60 btrfs_release_path(root, path);
61 goto again;
62 }
63 ret = 0;
64 *found_objectid = search_key.objectid;
65
66out:
67 btrfs_free_path(path);
68 return ret;
69}
70
71/*
72 * lookup the root with the highest offset for a given objectid. The key we do 25 * lookup the root with the highest offset for a given objectid. The key we do
73 * find is copied into 'key'. If we find something return 0, otherwise 1, < 0 26 * find is copied into 'key'. If we find something return 0, otherwise 1, < 0
74 * on error. 27 * on error.
@@ -230,7 +183,7 @@ again:
230 183
231 memcpy(&found_key, &key, sizeof(key)); 184 memcpy(&found_key, &key, sizeof(key));
232 key.offset++; 185 key.offset++;
233 btrfs_release_path(root, path); 186 btrfs_release_path(path);
234 dead_root = 187 dead_root =
235 btrfs_read_fs_root_no_radix(root->fs_info->tree_root, 188 btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
236 &found_key); 189 &found_key);
@@ -292,7 +245,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
292 } 245 }
293 246
294 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 247 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
295 btrfs_release_path(tree_root, path); 248 btrfs_release_path(path);
296 249
297 if (key.objectid != BTRFS_ORPHAN_OBJECTID || 250 if (key.objectid != BTRFS_ORPHAN_OBJECTID ||
298 key.type != BTRFS_ORPHAN_ITEM_KEY) 251 key.type != BTRFS_ORPHAN_ITEM_KEY)
@@ -385,18 +338,22 @@ again:
385 *sequence = btrfs_root_ref_sequence(leaf, ref); 338 *sequence = btrfs_root_ref_sequence(leaf, ref);
386 339
387 ret = btrfs_del_item(trans, tree_root, path); 340 ret = btrfs_del_item(trans, tree_root, path);
388 BUG_ON(ret); 341 if (ret) {
342 err = ret;
343 goto out;
344 }
389 } else 345 } else
390 err = -ENOENT; 346 err = -ENOENT;
391 347
392 if (key.type == BTRFS_ROOT_BACKREF_KEY) { 348 if (key.type == BTRFS_ROOT_BACKREF_KEY) {
393 btrfs_release_path(tree_root, path); 349 btrfs_release_path(path);
394 key.objectid = ref_id; 350 key.objectid = ref_id;
395 key.type = BTRFS_ROOT_REF_KEY; 351 key.type = BTRFS_ROOT_REF_KEY;
396 key.offset = root_id; 352 key.offset = root_id;
397 goto again; 353 goto again;
398 } 354 }
399 355
356out:
400 btrfs_free_path(path); 357 btrfs_free_path(path);
401 return err; 358 return err;
402} 359}
@@ -463,7 +420,7 @@ again:
463 btrfs_mark_buffer_dirty(leaf); 420 btrfs_mark_buffer_dirty(leaf);
464 421
465 if (key.type == BTRFS_ROOT_BACKREF_KEY) { 422 if (key.type == BTRFS_ROOT_BACKREF_KEY) {
466 btrfs_release_path(tree_root, path); 423 btrfs_release_path(path);
467 key.objectid = ref_id; 424 key.objectid = ref_id;
468 key.type = BTRFS_ROOT_REF_KEY; 425 key.type = BTRFS_ROOT_REF_KEY;
469 key.offset = root_id; 426 key.offset = root_id;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
new file mode 100644
index 000000000000..6dfed0c27ac3
--- /dev/null
+++ b/fs/btrfs/scrub.c
@@ -0,0 +1,1369 @@
1/*
2 * Copyright (C) 2011 STRATO. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include <linux/pagemap.h>
21#include <linux/writeback.h>
22#include <linux/blkdev.h>
23#include <linux/rbtree.h>
24#include <linux/slab.h>
25#include <linux/workqueue.h>
26#include "ctree.h"
27#include "volumes.h"
28#include "disk-io.h"
29#include "ordered-data.h"
30
31/*
32 * This is only the first step towards a full-features scrub. It reads all
33 * extent and super block and verifies the checksums. In case a bad checksum
34 * is found or the extent cannot be read, good data will be written back if
35 * any can be found.
36 *
37 * Future enhancements:
38 * - To enhance the performance, better read-ahead strategies for the
39 * extent-tree can be employed.
40 * - In case an unrepairable extent is encountered, track which files are
41 * affected and report them
42 * - In case of a read error on files with nodatasum, map the file and read
43 * the extent to trigger a writeback of the good copy
44 * - track and record media errors, throw out bad devices
45 * - add a mode to also read unallocated space
46 * - make the prefetch cancellable
47 */
48
49struct scrub_bio;
50struct scrub_page;
51struct scrub_dev;
52static void scrub_bio_end_io(struct bio *bio, int err);
53static void scrub_checksum(struct btrfs_work *work);
54static int scrub_checksum_data(struct scrub_dev *sdev,
55 struct scrub_page *spag, void *buffer);
56static int scrub_checksum_tree_block(struct scrub_dev *sdev,
57 struct scrub_page *spag, u64 logical,
58 void *buffer);
59static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer);
60static int scrub_fixup_check(struct scrub_bio *sbio, int ix);
61static void scrub_fixup_end_io(struct bio *bio, int err);
62static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
63 struct page *page);
64static void scrub_fixup(struct scrub_bio *sbio, int ix);
65
66#define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */
67#define SCRUB_BIOS_PER_DEV 16 /* 1 MB per device in flight */
68
69struct scrub_page {
70 u64 flags; /* extent flags */
71 u64 generation;
72 u64 mirror_num;
73 int have_csum;
74 u8 csum[BTRFS_CSUM_SIZE];
75};
76
77struct scrub_bio {
78 int index;
79 struct scrub_dev *sdev;
80 struct bio *bio;
81 int err;
82 u64 logical;
83 u64 physical;
84 struct scrub_page spag[SCRUB_PAGES_PER_BIO];
85 u64 count;
86 int next_free;
87 struct btrfs_work work;
88};
89
90struct scrub_dev {
91 struct scrub_bio *bios[SCRUB_BIOS_PER_DEV];
92 struct btrfs_device *dev;
93 int first_free;
94 int curr;
95 atomic_t in_flight;
96 spinlock_t list_lock;
97 wait_queue_head_t list_wait;
98 u16 csum_size;
99 struct list_head csum_list;
100 atomic_t cancel_req;
101 int readonly;
102 /*
103 * statistics
104 */
105 struct btrfs_scrub_progress stat;
106 spinlock_t stat_lock;
107};
108
109static void scrub_free_csums(struct scrub_dev *sdev)
110{
111 while (!list_empty(&sdev->csum_list)) {
112 struct btrfs_ordered_sum *sum;
113 sum = list_first_entry(&sdev->csum_list,
114 struct btrfs_ordered_sum, list);
115 list_del(&sum->list);
116 kfree(sum);
117 }
118}
119
120static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
121{
122 int i;
123 int j;
124 struct page *last_page;
125
126 if (!sdev)
127 return;
128
129 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
130 struct scrub_bio *sbio = sdev->bios[i];
131 struct bio *bio;
132
133 if (!sbio)
134 break;
135
136 bio = sbio->bio;
137 if (bio) {
138 last_page = NULL;
139 for (j = 0; j < bio->bi_vcnt; ++j) {
140 if (bio->bi_io_vec[j].bv_page == last_page)
141 continue;
142 last_page = bio->bi_io_vec[j].bv_page;
143 __free_page(last_page);
144 }
145 bio_put(bio);
146 }
147 kfree(sbio);
148 }
149
150 scrub_free_csums(sdev);
151 kfree(sdev);
152}
153
154static noinline_for_stack
155struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
156{
157 struct scrub_dev *sdev;
158 int i;
159 int j;
160 int ret;
161 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
162
163 sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
164 if (!sdev)
165 goto nomem;
166 sdev->dev = dev;
167 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
168 struct bio *bio;
169 struct scrub_bio *sbio;
170
171 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
172 if (!sbio)
173 goto nomem;
174 sdev->bios[i] = sbio;
175
176 bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO);
177 if (!bio)
178 goto nomem;
179
180 sbio->index = i;
181 sbio->sdev = sdev;
182 sbio->bio = bio;
183 sbio->count = 0;
184 sbio->work.func = scrub_checksum;
185 bio->bi_private = sdev->bios[i];
186 bio->bi_end_io = scrub_bio_end_io;
187 bio->bi_sector = 0;
188 bio->bi_bdev = dev->bdev;
189 bio->bi_size = 0;
190
191 for (j = 0; j < SCRUB_PAGES_PER_BIO; ++j) {
192 struct page *page;
193 page = alloc_page(GFP_NOFS);
194 if (!page)
195 goto nomem;
196
197 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
198 if (!ret)
199 goto nomem;
200 }
201 WARN_ON(bio->bi_vcnt != SCRUB_PAGES_PER_BIO);
202
203 if (i != SCRUB_BIOS_PER_DEV-1)
204 sdev->bios[i]->next_free = i + 1;
205 else
206 sdev->bios[i]->next_free = -1;
207 }
208 sdev->first_free = 0;
209 sdev->curr = -1;
210 atomic_set(&sdev->in_flight, 0);
211 atomic_set(&sdev->cancel_req, 0);
212 sdev->csum_size = btrfs_super_csum_size(&fs_info->super_copy);
213 INIT_LIST_HEAD(&sdev->csum_list);
214
215 spin_lock_init(&sdev->list_lock);
216 spin_lock_init(&sdev->stat_lock);
217 init_waitqueue_head(&sdev->list_wait);
218 return sdev;
219
220nomem:
221 scrub_free_dev(sdev);
222 return ERR_PTR(-ENOMEM);
223}
224
225/*
226 * scrub_recheck_error gets called when either verification of the page
227 * failed or the bio failed to read, e.g. with EIO. In the latter case,
228 * recheck_error gets called for every page in the bio, even though only
229 * one may be bad
230 */
231static void scrub_recheck_error(struct scrub_bio *sbio, int ix)
232{
233 if (sbio->err) {
234 if (scrub_fixup_io(READ, sbio->sdev->dev->bdev,
235 (sbio->physical + ix * PAGE_SIZE) >> 9,
236 sbio->bio->bi_io_vec[ix].bv_page) == 0) {
237 if (scrub_fixup_check(sbio, ix) == 0)
238 return;
239 }
240 }
241
242 scrub_fixup(sbio, ix);
243}
244
245static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
246{
247 int ret = 1;
248 struct page *page;
249 void *buffer;
250 u64 flags = sbio->spag[ix].flags;
251
252 page = sbio->bio->bi_io_vec[ix].bv_page;
253 buffer = kmap_atomic(page, KM_USER0);
254 if (flags & BTRFS_EXTENT_FLAG_DATA) {
255 ret = scrub_checksum_data(sbio->sdev,
256 sbio->spag + ix, buffer);
257 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
258 ret = scrub_checksum_tree_block(sbio->sdev,
259 sbio->spag + ix,
260 sbio->logical + ix * PAGE_SIZE,
261 buffer);
262 } else {
263 WARN_ON(1);
264 }
265 kunmap_atomic(buffer, KM_USER0);
266
267 return ret;
268}
269
270static void scrub_fixup_end_io(struct bio *bio, int err)
271{
272 complete((struct completion *)bio->bi_private);
273}
274
275static void scrub_fixup(struct scrub_bio *sbio, int ix)
276{
277 struct scrub_dev *sdev = sbio->sdev;
278 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
279 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
280 struct btrfs_multi_bio *multi = NULL;
281 u64 logical = sbio->logical + ix * PAGE_SIZE;
282 u64 length;
283 int i;
284 int ret;
285 DECLARE_COMPLETION_ONSTACK(complete);
286
287 if ((sbio->spag[ix].flags & BTRFS_EXTENT_FLAG_DATA) &&
288 (sbio->spag[ix].have_csum == 0)) {
289 /*
290 * nodatasum, don't try to fix anything
291 * FIXME: we can do better, open the inode and trigger a
292 * writeback
293 */
294 goto uncorrectable;
295 }
296
297 length = PAGE_SIZE;
298 ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length,
299 &multi, 0);
300 if (ret || !multi || length < PAGE_SIZE) {
301 printk(KERN_ERR
302 "scrub_fixup: btrfs_map_block failed us for %llu\n",
303 (unsigned long long)logical);
304 WARN_ON(1);
305 return;
306 }
307
308 if (multi->num_stripes == 1)
309 /* there aren't any replicas */
310 goto uncorrectable;
311
312 /*
313 * first find a good copy
314 */
315 for (i = 0; i < multi->num_stripes; ++i) {
316 if (i == sbio->spag[ix].mirror_num)
317 continue;
318
319 if (scrub_fixup_io(READ, multi->stripes[i].dev->bdev,
320 multi->stripes[i].physical >> 9,
321 sbio->bio->bi_io_vec[ix].bv_page)) {
322 /* I/O-error, this is not a good copy */
323 continue;
324 }
325
326 if (scrub_fixup_check(sbio, ix) == 0)
327 break;
328 }
329 if (i == multi->num_stripes)
330 goto uncorrectable;
331
332 if (!sdev->readonly) {
333 /*
334 * bi_io_vec[ix].bv_page now contains good data, write it back
335 */
336 if (scrub_fixup_io(WRITE, sdev->dev->bdev,
337 (sbio->physical + ix * PAGE_SIZE) >> 9,
338 sbio->bio->bi_io_vec[ix].bv_page)) {
339 /* I/O-error, writeback failed, give up */
340 goto uncorrectable;
341 }
342 }
343
344 kfree(multi);
345 spin_lock(&sdev->stat_lock);
346 ++sdev->stat.corrected_errors;
347 spin_unlock(&sdev->stat_lock);
348
349 if (printk_ratelimit())
350 printk(KERN_ERR "btrfs: fixed up at %llu\n",
351 (unsigned long long)logical);
352 return;
353
354uncorrectable:
355 kfree(multi);
356 spin_lock(&sdev->stat_lock);
357 ++sdev->stat.uncorrectable_errors;
358 spin_unlock(&sdev->stat_lock);
359
360 if (printk_ratelimit())
361 printk(KERN_ERR "btrfs: unable to fixup at %llu\n",
362 (unsigned long long)logical);
363}
364
365static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
366 struct page *page)
367{
368 struct bio *bio = NULL;
369 int ret;
370 DECLARE_COMPLETION_ONSTACK(complete);
371
372 /* we are going to wait on this IO */
373 rw |= REQ_SYNC;
374
375 bio = bio_alloc(GFP_NOFS, 1);
376 bio->bi_bdev = bdev;
377 bio->bi_sector = sector;
378 bio_add_page(bio, page, PAGE_SIZE, 0);
379 bio->bi_end_io = scrub_fixup_end_io;
380 bio->bi_private = &complete;
381 submit_bio(rw, bio);
382
383 wait_for_completion(&complete);
384
385 ret = !test_bit(BIO_UPTODATE, &bio->bi_flags);
386 bio_put(bio);
387 return ret;
388}
389
390static void scrub_bio_end_io(struct bio *bio, int err)
391{
392 struct scrub_bio *sbio = bio->bi_private;
393 struct scrub_dev *sdev = sbio->sdev;
394 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
395
396 sbio->err = err;
397
398 btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
399}
400
401static void scrub_checksum(struct btrfs_work *work)
402{
403 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
404 struct scrub_dev *sdev = sbio->sdev;
405 struct page *page;
406 void *buffer;
407 int i;
408 u64 flags;
409 u64 logical;
410 int ret;
411
412 if (sbio->err) {
413 for (i = 0; i < sbio->count; ++i)
414 scrub_recheck_error(sbio, i);
415
416 sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
417 sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
418 sbio->bio->bi_phys_segments = 0;
419 sbio->bio->bi_idx = 0;
420
421 for (i = 0; i < sbio->count; i++) {
422 struct bio_vec *bi;
423 bi = &sbio->bio->bi_io_vec[i];
424 bi->bv_offset = 0;
425 bi->bv_len = PAGE_SIZE;
426 }
427
428 spin_lock(&sdev->stat_lock);
429 ++sdev->stat.read_errors;
430 spin_unlock(&sdev->stat_lock);
431 goto out;
432 }
433 for (i = 0; i < sbio->count; ++i) {
434 page = sbio->bio->bi_io_vec[i].bv_page;
435 buffer = kmap_atomic(page, KM_USER0);
436 flags = sbio->spag[i].flags;
437 logical = sbio->logical + i * PAGE_SIZE;
438 ret = 0;
439 if (flags & BTRFS_EXTENT_FLAG_DATA) {
440 ret = scrub_checksum_data(sdev, sbio->spag + i, buffer);
441 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
442 ret = scrub_checksum_tree_block(sdev, sbio->spag + i,
443 logical, buffer);
444 } else if (flags & BTRFS_EXTENT_FLAG_SUPER) {
445 BUG_ON(i);
446 (void)scrub_checksum_super(sbio, buffer);
447 } else {
448 WARN_ON(1);
449 }
450 kunmap_atomic(buffer, KM_USER0);
451 if (ret)
452 scrub_recheck_error(sbio, i);
453 }
454
455out:
456 spin_lock(&sdev->list_lock);
457 sbio->next_free = sdev->first_free;
458 sdev->first_free = sbio->index;
459 spin_unlock(&sdev->list_lock);
460 atomic_dec(&sdev->in_flight);
461 wake_up(&sdev->list_wait);
462}
463
464static int scrub_checksum_data(struct scrub_dev *sdev,
465 struct scrub_page *spag, void *buffer)
466{
467 u8 csum[BTRFS_CSUM_SIZE];
468 u32 crc = ~(u32)0;
469 int fail = 0;
470 struct btrfs_root *root = sdev->dev->dev_root;
471
472 if (!spag->have_csum)
473 return 0;
474
475 crc = btrfs_csum_data(root, buffer, crc, PAGE_SIZE);
476 btrfs_csum_final(crc, csum);
477 if (memcmp(csum, spag->csum, sdev->csum_size))
478 fail = 1;
479
480 spin_lock(&sdev->stat_lock);
481 ++sdev->stat.data_extents_scrubbed;
482 sdev->stat.data_bytes_scrubbed += PAGE_SIZE;
483 if (fail)
484 ++sdev->stat.csum_errors;
485 spin_unlock(&sdev->stat_lock);
486
487 return fail;
488}
489
490static int scrub_checksum_tree_block(struct scrub_dev *sdev,
491 struct scrub_page *spag, u64 logical,
492 void *buffer)
493{
494 struct btrfs_header *h;
495 struct btrfs_root *root = sdev->dev->dev_root;
496 struct btrfs_fs_info *fs_info = root->fs_info;
497 u8 csum[BTRFS_CSUM_SIZE];
498 u32 crc = ~(u32)0;
499 int fail = 0;
500 int crc_fail = 0;
501
502 /*
503 * we don't use the getter functions here, as we
504 * a) don't have an extent buffer and
505 * b) the page is already kmapped
506 */
507 h = (struct btrfs_header *)buffer;
508
509 if (logical != le64_to_cpu(h->bytenr))
510 ++fail;
511
512 if (spag->generation != le64_to_cpu(h->generation))
513 ++fail;
514
515 if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
516 ++fail;
517
518 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
519 BTRFS_UUID_SIZE))
520 ++fail;
521
522 crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc,
523 PAGE_SIZE - BTRFS_CSUM_SIZE);
524 btrfs_csum_final(crc, csum);
525 if (memcmp(csum, h->csum, sdev->csum_size))
526 ++crc_fail;
527
528 spin_lock(&sdev->stat_lock);
529 ++sdev->stat.tree_extents_scrubbed;
530 sdev->stat.tree_bytes_scrubbed += PAGE_SIZE;
531 if (crc_fail)
532 ++sdev->stat.csum_errors;
533 if (fail)
534 ++sdev->stat.verify_errors;
535 spin_unlock(&sdev->stat_lock);
536
537 return fail || crc_fail;
538}
539
540static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
541{
542 struct btrfs_super_block *s;
543 u64 logical;
544 struct scrub_dev *sdev = sbio->sdev;
545 struct btrfs_root *root = sdev->dev->dev_root;
546 struct btrfs_fs_info *fs_info = root->fs_info;
547 u8 csum[BTRFS_CSUM_SIZE];
548 u32 crc = ~(u32)0;
549 int fail = 0;
550
551 s = (struct btrfs_super_block *)buffer;
552 logical = sbio->logical;
553
554 if (logical != le64_to_cpu(s->bytenr))
555 ++fail;
556
557 if (sbio->spag[0].generation != le64_to_cpu(s->generation))
558 ++fail;
559
560 if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
561 ++fail;
562
563 crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc,
564 PAGE_SIZE - BTRFS_CSUM_SIZE);
565 btrfs_csum_final(crc, csum);
566 if (memcmp(csum, s->csum, sbio->sdev->csum_size))
567 ++fail;
568
569 if (fail) {
570 /*
571 * if we find an error in a super block, we just report it.
572 * They will get written with the next transaction commit
573 * anyway
574 */
575 spin_lock(&sdev->stat_lock);
576 ++sdev->stat.super_errors;
577 spin_unlock(&sdev->stat_lock);
578 }
579
580 return fail;
581}
582
583static int scrub_submit(struct scrub_dev *sdev)
584{
585 struct scrub_bio *sbio;
586
587 if (sdev->curr == -1)
588 return 0;
589
590 sbio = sdev->bios[sdev->curr];
591
592 sbio->bio->bi_sector = sbio->physical >> 9;
593 sbio->bio->bi_size = sbio->count * PAGE_SIZE;
594 sbio->bio->bi_next = NULL;
595 sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
596 sbio->bio->bi_comp_cpu = -1;
597 sbio->bio->bi_bdev = sdev->dev->bdev;
598 sbio->err = 0;
599 sdev->curr = -1;
600 atomic_inc(&sdev->in_flight);
601
602 submit_bio(0, sbio->bio);
603
604 return 0;
605}
606
607static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
608 u64 physical, u64 flags, u64 gen, u64 mirror_num,
609 u8 *csum, int force)
610{
611 struct scrub_bio *sbio;
612
613again:
614 /*
615 * grab a fresh bio or wait for one to become available
616 */
617 while (sdev->curr == -1) {
618 spin_lock(&sdev->list_lock);
619 sdev->curr = sdev->first_free;
620 if (sdev->curr != -1) {
621 sdev->first_free = sdev->bios[sdev->curr]->next_free;
622 sdev->bios[sdev->curr]->next_free = -1;
623 sdev->bios[sdev->curr]->count = 0;
624 spin_unlock(&sdev->list_lock);
625 } else {
626 spin_unlock(&sdev->list_lock);
627 wait_event(sdev->list_wait, sdev->first_free != -1);
628 }
629 }
630 sbio = sdev->bios[sdev->curr];
631 if (sbio->count == 0) {
632 sbio->physical = physical;
633 sbio->logical = logical;
634 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
635 sbio->logical + sbio->count * PAGE_SIZE != logical) {
636 scrub_submit(sdev);
637 goto again;
638 }
639 sbio->spag[sbio->count].flags = flags;
640 sbio->spag[sbio->count].generation = gen;
641 sbio->spag[sbio->count].have_csum = 0;
642 sbio->spag[sbio->count].mirror_num = mirror_num;
643 if (csum) {
644 sbio->spag[sbio->count].have_csum = 1;
645 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
646 }
647 ++sbio->count;
648 if (sbio->count == SCRUB_PAGES_PER_BIO || force)
649 scrub_submit(sdev);
650
651 return 0;
652}
653
654static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
655 u8 *csum)
656{
657 struct btrfs_ordered_sum *sum = NULL;
658 int ret = 0;
659 unsigned long i;
660 unsigned long num_sectors;
661 u32 sectorsize = sdev->dev->dev_root->sectorsize;
662
663 while (!list_empty(&sdev->csum_list)) {
664 sum = list_first_entry(&sdev->csum_list,
665 struct btrfs_ordered_sum, list);
666 if (sum->bytenr > logical)
667 return 0;
668 if (sum->bytenr + sum->len > logical)
669 break;
670
671 ++sdev->stat.csum_discards;
672 list_del(&sum->list);
673 kfree(sum);
674 sum = NULL;
675 }
676 if (!sum)
677 return 0;
678
679 num_sectors = sum->len / sectorsize;
680 for (i = 0; i < num_sectors; ++i) {
681 if (sum->sums[i].bytenr == logical) {
682 memcpy(csum, &sum->sums[i].sum, sdev->csum_size);
683 ret = 1;
684 break;
685 }
686 }
687 if (ret && i == num_sectors - 1) {
688 list_del(&sum->list);
689 kfree(sum);
690 }
691 return ret;
692}
693
694/* scrub extent tries to collect up to 64 kB for each bio */
695static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
696 u64 physical, u64 flags, u64 gen, u64 mirror_num)
697{
698 int ret;
699 u8 csum[BTRFS_CSUM_SIZE];
700
701 while (len) {
702 u64 l = min_t(u64, len, PAGE_SIZE);
703 int have_csum = 0;
704
705 if (flags & BTRFS_EXTENT_FLAG_DATA) {
706 /* push csums to sbio */
707 have_csum = scrub_find_csum(sdev, logical, l, csum);
708 if (have_csum == 0)
709 ++sdev->stat.no_csum;
710 }
711 ret = scrub_page(sdev, logical, l, physical, flags, gen,
712 mirror_num, have_csum ? csum : NULL, 0);
713 if (ret)
714 return ret;
715 len -= l;
716 logical += l;
717 physical += l;
718 }
719 return 0;
720}
721
722static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
723 struct map_lookup *map, int num, u64 base, u64 length)
724{
725 struct btrfs_path *path;
726 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
727 struct btrfs_root *root = fs_info->extent_root;
728 struct btrfs_root *csum_root = fs_info->csum_root;
729 struct btrfs_extent_item *extent;
730 u64 flags;
731 int ret;
732 int slot;
733 int i;
734 u64 nstripes;
735 int start_stripe;
736 struct extent_buffer *l;
737 struct btrfs_key key;
738 u64 physical;
739 u64 logical;
740 u64 generation;
741 u64 mirror_num;
742
743 u64 increment = map->stripe_len;
744 u64 offset;
745
746 nstripes = length;
747 offset = 0;
748 do_div(nstripes, map->stripe_len);
749 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
750 offset = map->stripe_len * num;
751 increment = map->stripe_len * map->num_stripes;
752 mirror_num = 0;
753 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
754 int factor = map->num_stripes / map->sub_stripes;
755 offset = map->stripe_len * (num / map->sub_stripes);
756 increment = map->stripe_len * factor;
757 mirror_num = num % map->sub_stripes;
758 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
759 increment = map->stripe_len;
760 mirror_num = num % map->num_stripes;
761 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
762 increment = map->stripe_len;
763 mirror_num = num % map->num_stripes;
764 } else {
765 increment = map->stripe_len;
766 mirror_num = 0;
767 }
768
769 path = btrfs_alloc_path();
770 if (!path)
771 return -ENOMEM;
772
773 path->reada = 2;
774 path->search_commit_root = 1;
775 path->skip_locking = 1;
776
777 /*
778 * find all extents for each stripe and just read them to get
779 * them into the page cache
780 * FIXME: we can do better. build a more intelligent prefetching
781 */
782 logical = base + offset;
783 physical = map->stripes[num].physical;
784 ret = 0;
785 for (i = 0; i < nstripes; ++i) {
786 key.objectid = logical;
787 key.type = BTRFS_EXTENT_ITEM_KEY;
788 key.offset = (u64)0;
789
790 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
791 if (ret < 0)
792 goto out;
793
794 l = path->nodes[0];
795 slot = path->slots[0];
796 btrfs_item_key_to_cpu(l, &key, slot);
797 if (key.objectid != logical) {
798 ret = btrfs_previous_item(root, path, 0,
799 BTRFS_EXTENT_ITEM_KEY);
800 if (ret < 0)
801 goto out;
802 }
803
804 while (1) {
805 l = path->nodes[0];
806 slot = path->slots[0];
807 if (slot >= btrfs_header_nritems(l)) {
808 ret = btrfs_next_leaf(root, path);
809 if (ret == 0)
810 continue;
811 if (ret < 0)
812 goto out;
813
814 break;
815 }
816 btrfs_item_key_to_cpu(l, &key, slot);
817
818 if (key.objectid >= logical + map->stripe_len)
819 break;
820
821 path->slots[0]++;
822 }
823 btrfs_release_path(path);
824 logical += increment;
825 physical += map->stripe_len;
826 cond_resched();
827 }
828
829 /*
830 * collect all data csums for the stripe to avoid seeking during
831 * the scrub. This might currently (crc32) end up to be about 1MB
832 */
833 start_stripe = 0;
834again:
835 logical = base + offset + start_stripe * increment;
836 for (i = start_stripe; i < nstripes; ++i) {
837 ret = btrfs_lookup_csums_range(csum_root, logical,
838 logical + map->stripe_len - 1,
839 &sdev->csum_list, 1);
840 if (ret)
841 goto out;
842
843 logical += increment;
844 cond_resched();
845 }
846 /*
847 * now find all extents for each stripe and scrub them
848 */
849 logical = base + offset + start_stripe * increment;
850 physical = map->stripes[num].physical + start_stripe * map->stripe_len;
851 ret = 0;
852 for (i = start_stripe; i < nstripes; ++i) {
853 /*
854 * canceled?
855 */
856 if (atomic_read(&fs_info->scrub_cancel_req) ||
857 atomic_read(&sdev->cancel_req)) {
858 ret = -ECANCELED;
859 goto out;
860 }
861 /*
862 * check to see if we have to pause
863 */
864 if (atomic_read(&fs_info->scrub_pause_req)) {
865 /* push queued extents */
866 scrub_submit(sdev);
867 wait_event(sdev->list_wait,
868 atomic_read(&sdev->in_flight) == 0);
869 atomic_inc(&fs_info->scrubs_paused);
870 wake_up(&fs_info->scrub_pause_wait);
871 mutex_lock(&fs_info->scrub_lock);
872 while (atomic_read(&fs_info->scrub_pause_req)) {
873 mutex_unlock(&fs_info->scrub_lock);
874 wait_event(fs_info->scrub_pause_wait,
875 atomic_read(&fs_info->scrub_pause_req) == 0);
876 mutex_lock(&fs_info->scrub_lock);
877 }
878 atomic_dec(&fs_info->scrubs_paused);
879 mutex_unlock(&fs_info->scrub_lock);
880 wake_up(&fs_info->scrub_pause_wait);
881 scrub_free_csums(sdev);
882 start_stripe = i;
883 goto again;
884 }
885
886 key.objectid = logical;
887 key.type = BTRFS_EXTENT_ITEM_KEY;
888 key.offset = (u64)0;
889
890 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
891 if (ret < 0)
892 goto out;
893
894 l = path->nodes[0];
895 slot = path->slots[0];
896 btrfs_item_key_to_cpu(l, &key, slot);
897 if (key.objectid != logical) {
898 ret = btrfs_previous_item(root, path, 0,
899 BTRFS_EXTENT_ITEM_KEY);
900 if (ret < 0)
901 goto out;
902 }
903
904 while (1) {
905 l = path->nodes[0];
906 slot = path->slots[0];
907 if (slot >= btrfs_header_nritems(l)) {
908 ret = btrfs_next_leaf(root, path);
909 if (ret == 0)
910 continue;
911 if (ret < 0)
912 goto out;
913
914 break;
915 }
916 btrfs_item_key_to_cpu(l, &key, slot);
917
918 if (key.objectid + key.offset <= logical)
919 goto next;
920
921 if (key.objectid >= logical + map->stripe_len)
922 break;
923
924 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
925 goto next;
926
927 extent = btrfs_item_ptr(l, slot,
928 struct btrfs_extent_item);
929 flags = btrfs_extent_flags(l, extent);
930 generation = btrfs_extent_generation(l, extent);
931
932 if (key.objectid < logical &&
933 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
934 printk(KERN_ERR
935 "btrfs scrub: tree block %llu spanning "
936 "stripes, ignored. logical=%llu\n",
937 (unsigned long long)key.objectid,
938 (unsigned long long)logical);
939 goto next;
940 }
941
942 /*
943 * trim extent to this stripe
944 */
945 if (key.objectid < logical) {
946 key.offset -= logical - key.objectid;
947 key.objectid = logical;
948 }
949 if (key.objectid + key.offset >
950 logical + map->stripe_len) {
951 key.offset = logical + map->stripe_len -
952 key.objectid;
953 }
954
955 ret = scrub_extent(sdev, key.objectid, key.offset,
956 key.objectid - logical + physical,
957 flags, generation, mirror_num);
958 if (ret)
959 goto out;
960
961next:
962 path->slots[0]++;
963 }
964 btrfs_release_path(path);
965 logical += increment;
966 physical += map->stripe_len;
967 spin_lock(&sdev->stat_lock);
968 sdev->stat.last_physical = physical;
969 spin_unlock(&sdev->stat_lock);
970 }
971 /* push queued extents */
972 scrub_submit(sdev);
973
974out:
975 btrfs_free_path(path);
976 return ret < 0 ? ret : 0;
977}
978
979static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev,
980 u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length)
981{
982 struct btrfs_mapping_tree *map_tree =
983 &sdev->dev->dev_root->fs_info->mapping_tree;
984 struct map_lookup *map;
985 struct extent_map *em;
986 int i;
987 int ret = -EINVAL;
988
989 read_lock(&map_tree->map_tree.lock);
990 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
991 read_unlock(&map_tree->map_tree.lock);
992
993 if (!em)
994 return -EINVAL;
995
996 map = (struct map_lookup *)em->bdev;
997 if (em->start != chunk_offset)
998 goto out;
999
1000 if (em->len < length)
1001 goto out;
1002
1003 for (i = 0; i < map->num_stripes; ++i) {
1004 if (map->stripes[i].dev == sdev->dev) {
1005 ret = scrub_stripe(sdev, map, i, chunk_offset, length);
1006 if (ret)
1007 goto out;
1008 }
1009 }
1010out:
1011 free_extent_map(em);
1012
1013 return ret;
1014}
1015
1016static noinline_for_stack
1017int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
1018{
1019 struct btrfs_dev_extent *dev_extent = NULL;
1020 struct btrfs_path *path;
1021 struct btrfs_root *root = sdev->dev->dev_root;
1022 struct btrfs_fs_info *fs_info = root->fs_info;
1023 u64 length;
1024 u64 chunk_tree;
1025 u64 chunk_objectid;
1026 u64 chunk_offset;
1027 int ret;
1028 int slot;
1029 struct extent_buffer *l;
1030 struct btrfs_key key;
1031 struct btrfs_key found_key;
1032 struct btrfs_block_group_cache *cache;
1033
1034 path = btrfs_alloc_path();
1035 if (!path)
1036 return -ENOMEM;
1037
1038 path->reada = 2;
1039 path->search_commit_root = 1;
1040 path->skip_locking = 1;
1041
1042 key.objectid = sdev->dev->devid;
1043 key.offset = 0ull;
1044 key.type = BTRFS_DEV_EXTENT_KEY;
1045
1046
1047 while (1) {
1048 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1049 if (ret < 0)
1050 goto out;
1051 ret = 0;
1052
1053 l = path->nodes[0];
1054 slot = path->slots[0];
1055
1056 btrfs_item_key_to_cpu(l, &found_key, slot);
1057
1058 if (found_key.objectid != sdev->dev->devid)
1059 break;
1060
1061 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
1062 break;
1063
1064 if (found_key.offset >= end)
1065 break;
1066
1067 if (found_key.offset < key.offset)
1068 break;
1069
1070 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1071 length = btrfs_dev_extent_length(l, dev_extent);
1072
1073 if (found_key.offset + length <= start) {
1074 key.offset = found_key.offset + length;
1075 btrfs_release_path(path);
1076 continue;
1077 }
1078
1079 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
1080 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
1081 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
1082
1083 /*
1084 * get a reference on the corresponding block group to prevent
1085 * the chunk from going away while we scrub it
1086 */
1087 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
1088 if (!cache) {
1089 ret = -ENOENT;
1090 goto out;
1091 }
1092 ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
1093 chunk_offset, length);
1094 btrfs_put_block_group(cache);
1095 if (ret)
1096 break;
1097
1098 key.offset = found_key.offset + length;
1099 btrfs_release_path(path);
1100 }
1101
1102out:
1103 btrfs_free_path(path);
1104 return ret;
1105}
1106
1107static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
1108{
1109 int i;
1110 u64 bytenr;
1111 u64 gen;
1112 int ret;
1113 struct btrfs_device *device = sdev->dev;
1114 struct btrfs_root *root = device->dev_root;
1115
1116 gen = root->fs_info->last_trans_committed;
1117
1118 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1119 bytenr = btrfs_sb_offset(i);
1120 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
1121 break;
1122
1123 ret = scrub_page(sdev, bytenr, PAGE_SIZE, bytenr,
1124 BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1);
1125 if (ret)
1126 return ret;
1127 }
1128 wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
1129
1130 return 0;
1131}
1132
1133/*
1134 * get a reference count on fs_info->scrub_workers. start worker if necessary
1135 */
1136static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
1137{
1138 struct btrfs_fs_info *fs_info = root->fs_info;
1139
1140 mutex_lock(&fs_info->scrub_lock);
1141 if (fs_info->scrub_workers_refcnt == 0)
1142 btrfs_start_workers(&fs_info->scrub_workers, 1);
1143 ++fs_info->scrub_workers_refcnt;
1144 mutex_unlock(&fs_info->scrub_lock);
1145
1146 return 0;
1147}
1148
1149static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
1150{
1151 struct btrfs_fs_info *fs_info = root->fs_info;
1152
1153 mutex_lock(&fs_info->scrub_lock);
1154 if (--fs_info->scrub_workers_refcnt == 0)
1155 btrfs_stop_workers(&fs_info->scrub_workers);
1156 WARN_ON(fs_info->scrub_workers_refcnt < 0);
1157 mutex_unlock(&fs_info->scrub_lock);
1158}
1159
1160
1161int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
1162 struct btrfs_scrub_progress *progress, int readonly)
1163{
1164 struct scrub_dev *sdev;
1165 struct btrfs_fs_info *fs_info = root->fs_info;
1166 int ret;
1167 struct btrfs_device *dev;
1168
1169 if (root->fs_info->closing)
1170 return -EINVAL;
1171
1172 /*
1173 * check some assumptions
1174 */
1175 if (root->sectorsize != PAGE_SIZE ||
1176 root->sectorsize != root->leafsize ||
1177 root->sectorsize != root->nodesize) {
1178 printk(KERN_ERR "btrfs_scrub: size assumptions fail\n");
1179 return -EINVAL;
1180 }
1181
1182 ret = scrub_workers_get(root);
1183 if (ret)
1184 return ret;
1185
1186 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1187 dev = btrfs_find_device(root, devid, NULL, NULL);
1188 if (!dev || dev->missing) {
1189 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1190 scrub_workers_put(root);
1191 return -ENODEV;
1192 }
1193 mutex_lock(&fs_info->scrub_lock);
1194
1195 if (!dev->in_fs_metadata) {
1196 mutex_unlock(&fs_info->scrub_lock);
1197 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1198 scrub_workers_put(root);
1199 return -ENODEV;
1200 }
1201
1202 if (dev->scrub_device) {
1203 mutex_unlock(&fs_info->scrub_lock);
1204 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1205 scrub_workers_put(root);
1206 return -EINPROGRESS;
1207 }
1208 sdev = scrub_setup_dev(dev);
1209 if (IS_ERR(sdev)) {
1210 mutex_unlock(&fs_info->scrub_lock);
1211 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1212 scrub_workers_put(root);
1213 return PTR_ERR(sdev);
1214 }
1215 sdev->readonly = readonly;
1216 dev->scrub_device = sdev;
1217
1218 atomic_inc(&fs_info->scrubs_running);
1219 mutex_unlock(&fs_info->scrub_lock);
1220 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1221
1222 down_read(&fs_info->scrub_super_lock);
1223 ret = scrub_supers(sdev);
1224 up_read(&fs_info->scrub_super_lock);
1225
1226 if (!ret)
1227 ret = scrub_enumerate_chunks(sdev, start, end);
1228
1229 wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
1230
1231 atomic_dec(&fs_info->scrubs_running);
1232 wake_up(&fs_info->scrub_pause_wait);
1233
1234 if (progress)
1235 memcpy(progress, &sdev->stat, sizeof(*progress));
1236
1237 mutex_lock(&fs_info->scrub_lock);
1238 dev->scrub_device = NULL;
1239 mutex_unlock(&fs_info->scrub_lock);
1240
1241 scrub_free_dev(sdev);
1242 scrub_workers_put(root);
1243
1244 return ret;
1245}
1246
1247int btrfs_scrub_pause(struct btrfs_root *root)
1248{
1249 struct btrfs_fs_info *fs_info = root->fs_info;
1250
1251 mutex_lock(&fs_info->scrub_lock);
1252 atomic_inc(&fs_info->scrub_pause_req);
1253 while (atomic_read(&fs_info->scrubs_paused) !=
1254 atomic_read(&fs_info->scrubs_running)) {
1255 mutex_unlock(&fs_info->scrub_lock);
1256 wait_event(fs_info->scrub_pause_wait,
1257 atomic_read(&fs_info->scrubs_paused) ==
1258 atomic_read(&fs_info->scrubs_running));
1259 mutex_lock(&fs_info->scrub_lock);
1260 }
1261 mutex_unlock(&fs_info->scrub_lock);
1262
1263 return 0;
1264}
1265
1266int btrfs_scrub_continue(struct btrfs_root *root)
1267{
1268 struct btrfs_fs_info *fs_info = root->fs_info;
1269
1270 atomic_dec(&fs_info->scrub_pause_req);
1271 wake_up(&fs_info->scrub_pause_wait);
1272 return 0;
1273}
1274
1275int btrfs_scrub_pause_super(struct btrfs_root *root)
1276{
1277 down_write(&root->fs_info->scrub_super_lock);
1278 return 0;
1279}
1280
1281int btrfs_scrub_continue_super(struct btrfs_root *root)
1282{
1283 up_write(&root->fs_info->scrub_super_lock);
1284 return 0;
1285}
1286
1287int btrfs_scrub_cancel(struct btrfs_root *root)
1288{
1289 struct btrfs_fs_info *fs_info = root->fs_info;
1290
1291 mutex_lock(&fs_info->scrub_lock);
1292 if (!atomic_read(&fs_info->scrubs_running)) {
1293 mutex_unlock(&fs_info->scrub_lock);
1294 return -ENOTCONN;
1295 }
1296
1297 atomic_inc(&fs_info->scrub_cancel_req);
1298 while (atomic_read(&fs_info->scrubs_running)) {
1299 mutex_unlock(&fs_info->scrub_lock);
1300 wait_event(fs_info->scrub_pause_wait,
1301 atomic_read(&fs_info->scrubs_running) == 0);
1302 mutex_lock(&fs_info->scrub_lock);
1303 }
1304 atomic_dec(&fs_info->scrub_cancel_req);
1305 mutex_unlock(&fs_info->scrub_lock);
1306
1307 return 0;
1308}
1309
1310int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
1311{
1312 struct btrfs_fs_info *fs_info = root->fs_info;
1313 struct scrub_dev *sdev;
1314
1315 mutex_lock(&fs_info->scrub_lock);
1316 sdev = dev->scrub_device;
1317 if (!sdev) {
1318 mutex_unlock(&fs_info->scrub_lock);
1319 return -ENOTCONN;
1320 }
1321 atomic_inc(&sdev->cancel_req);
1322 while (dev->scrub_device) {
1323 mutex_unlock(&fs_info->scrub_lock);
1324 wait_event(fs_info->scrub_pause_wait,
1325 dev->scrub_device == NULL);
1326 mutex_lock(&fs_info->scrub_lock);
1327 }
1328 mutex_unlock(&fs_info->scrub_lock);
1329
1330 return 0;
1331}
1332int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid)
1333{
1334 struct btrfs_fs_info *fs_info = root->fs_info;
1335 struct btrfs_device *dev;
1336 int ret;
1337
1338 /*
1339 * we have to hold the device_list_mutex here so the device
1340 * does not go away in cancel_dev. FIXME: find a better solution
1341 */
1342 mutex_lock(&fs_info->fs_devices->device_list_mutex);
1343 dev = btrfs_find_device(root, devid, NULL, NULL);
1344 if (!dev) {
1345 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1346 return -ENODEV;
1347 }
1348 ret = btrfs_scrub_cancel_dev(root, dev);
1349 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1350
1351 return ret;
1352}
1353
1354int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
1355 struct btrfs_scrub_progress *progress)
1356{
1357 struct btrfs_device *dev;
1358 struct scrub_dev *sdev = NULL;
1359
1360 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1361 dev = btrfs_find_device(root, devid, NULL, NULL);
1362 if (dev)
1363 sdev = dev->scrub_device;
1364 if (sdev)
1365 memcpy(progress, &sdev->stat, sizeof(*progress));
1366 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1367
1368 return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV;
1369}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index be4ffa12f3ef..9b2e7e5bc3ef 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -41,6 +41,7 @@
41#include <linux/slab.h> 41#include <linux/slab.h>
42#include <linux/cleancache.h> 42#include <linux/cleancache.h>
43#include "compat.h" 43#include "compat.h"
44#include "delayed-inode.h"
44#include "ctree.h" 45#include "ctree.h"
45#include "disk-io.h" 46#include "disk-io.h"
46#include "transaction.h" 47#include "transaction.h"
@@ -160,7 +161,7 @@ enum {
160 Opt_compress_type, Opt_compress_force, Opt_compress_force_type, 161 Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
161 Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, 162 Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
162 Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, 163 Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
163 Opt_enospc_debug, Opt_subvolrootid, Opt_err, 164 Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_err,
164}; 165};
165 166
166static match_table_t tokens = { 167static match_table_t tokens = {
@@ -191,6 +192,7 @@ static match_table_t tokens = {
191 {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, 192 {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
192 {Opt_enospc_debug, "enospc_debug"}, 193 {Opt_enospc_debug, "enospc_debug"},
193 {Opt_subvolrootid, "subvolrootid=%d"}, 194 {Opt_subvolrootid, "subvolrootid=%d"},
195 {Opt_defrag, "autodefrag"},
194 {Opt_err, NULL}, 196 {Opt_err, NULL},
195}; 197};
196 198
@@ -369,6 +371,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
369 case Opt_enospc_debug: 371 case Opt_enospc_debug:
370 btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG); 372 btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
371 break; 373 break;
374 case Opt_defrag:
375 printk(KERN_INFO "btrfs: enabling auto defrag");
376 btrfs_set_opt(info->mount_opt, AUTO_DEFRAG);
377 break;
372 case Opt_err: 378 case Opt_err:
373 printk(KERN_INFO "btrfs: unrecognized mount option " 379 printk(KERN_INFO "btrfs: unrecognized mount option "
374 "'%s'\n", p); 380 "'%s'\n", p);
@@ -507,8 +513,10 @@ static struct dentry *get_default_root(struct super_block *sb,
507 */ 513 */
508 dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); 514 dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
509 di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0); 515 di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
510 if (IS_ERR(di)) 516 if (IS_ERR(di)) {
517 btrfs_free_path(path);
511 return ERR_CAST(di); 518 return ERR_CAST(di);
519 }
512 if (!di) { 520 if (!di) {
513 /* 521 /*
514 * Ok the default dir item isn't there. This is weird since 522 * Ok the default dir item isn't there. This is weird since
@@ -741,7 +749,7 @@ static int btrfs_set_super(struct super_block *s, void *data)
741 * for multiple device setup. Make sure to keep it in sync. 749 * for multiple device setup. Make sure to keep it in sync.
742 */ 750 */
743static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, 751static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
744 const char *dev_name, void *data) 752 const char *device_name, void *data)
745{ 753{
746 struct block_device *bdev = NULL; 754 struct block_device *bdev = NULL;
747 struct super_block *s; 755 struct super_block *s;
@@ -764,7 +772,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
764 if (error) 772 if (error)
765 return ERR_PTR(error); 773 return ERR_PTR(error);
766 774
767 error = btrfs_scan_one_device(dev_name, mode, fs_type, &fs_devices); 775 error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices);
768 if (error) 776 if (error)
769 goto error_free_subvol_name; 777 goto error_free_subvol_name;
770 778
@@ -915,6 +923,32 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
915 return 0; 923 return 0;
916} 924}
917 925
926/* Used to sort the devices by max_avail(descending sort) */
927static int btrfs_cmp_device_free_bytes(const void *dev_info1,
928 const void *dev_info2)
929{
930 if (((struct btrfs_device_info *)dev_info1)->max_avail >
931 ((struct btrfs_device_info *)dev_info2)->max_avail)
932 return -1;
933 else if (((struct btrfs_device_info *)dev_info1)->max_avail <
934 ((struct btrfs_device_info *)dev_info2)->max_avail)
935 return 1;
936 else
937 return 0;
938}
939
940/*
941 * sort the devices by max_avail, in which max free extent size of each device
942 * is stored.(Descending Sort)
943 */
944static inline void btrfs_descending_sort_devices(
945 struct btrfs_device_info *devices,
946 size_t nr_devices)
947{
948 sort(devices, nr_devices, sizeof(struct btrfs_device_info),
949 btrfs_cmp_device_free_bytes, NULL);
950}
951
918/* 952/*
919 * The helper to calc the free space on the devices that can be used to store 953 * The helper to calc the free space on the devices that can be used to store
920 * file data. 954 * file data.
@@ -1208,10 +1242,14 @@ static int __init init_btrfs_fs(void)
1208 if (err) 1242 if (err)
1209 goto free_extent_io; 1243 goto free_extent_io;
1210 1244
1211 err = btrfs_interface_init(); 1245 err = btrfs_delayed_inode_init();
1212 if (err) 1246 if (err)
1213 goto free_extent_map; 1247 goto free_extent_map;
1214 1248
1249 err = btrfs_interface_init();
1250 if (err)
1251 goto free_delayed_inode;
1252
1215 err = register_filesystem(&btrfs_fs_type); 1253 err = register_filesystem(&btrfs_fs_type);
1216 if (err) 1254 if (err)
1217 goto unregister_ioctl; 1255 goto unregister_ioctl;
@@ -1221,6 +1259,8 @@ static int __init init_btrfs_fs(void)
1221 1259
1222unregister_ioctl: 1260unregister_ioctl:
1223 btrfs_interface_exit(); 1261 btrfs_interface_exit();
1262free_delayed_inode:
1263 btrfs_delayed_inode_exit();
1224free_extent_map: 1264free_extent_map:
1225 extent_map_exit(); 1265 extent_map_exit();
1226free_extent_io: 1266free_extent_io:
@@ -1237,6 +1277,7 @@ free_sysfs:
1237static void __exit exit_btrfs_fs(void) 1277static void __exit exit_btrfs_fs(void)
1238{ 1278{
1239 btrfs_destroy_cachep(); 1279 btrfs_destroy_cachep();
1280 btrfs_delayed_inode_exit();
1240 extent_map_exit(); 1281 extent_map_exit();
1241 extent_io_exit(); 1282 extent_io_exit();
1242 btrfs_interface_exit(); 1283 btrfs_interface_exit();
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 4ce16ef702a3..c3c223ae6691 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -174,86 +174,9 @@ static const struct sysfs_ops btrfs_root_attr_ops = {
174 .store = btrfs_root_attr_store, 174 .store = btrfs_root_attr_store,
175}; 175};
176 176
177static struct kobj_type btrfs_root_ktype = {
178 .default_attrs = btrfs_root_attrs,
179 .sysfs_ops = &btrfs_root_attr_ops,
180 .release = btrfs_root_release,
181};
182
183static struct kobj_type btrfs_super_ktype = {
184 .default_attrs = btrfs_super_attrs,
185 .sysfs_ops = &btrfs_super_attr_ops,
186 .release = btrfs_super_release,
187};
188
189/* /sys/fs/btrfs/ entry */ 177/* /sys/fs/btrfs/ entry */
190static struct kset *btrfs_kset; 178static struct kset *btrfs_kset;
191 179
192int btrfs_sysfs_add_super(struct btrfs_fs_info *fs)
193{
194 int error;
195 char *name;
196 char c;
197 int len = strlen(fs->sb->s_id) + 1;
198 int i;
199
200 name = kmalloc(len, GFP_NOFS);
201 if (!name) {
202 error = -ENOMEM;
203 goto fail;
204 }
205
206 for (i = 0; i < len; i++) {
207 c = fs->sb->s_id[i];
208 if (c == '/' || c == '\\')
209 c = '!';
210 name[i] = c;
211 }
212 name[len] = '\0';
213
214 fs->super_kobj.kset = btrfs_kset;
215 error = kobject_init_and_add(&fs->super_kobj, &btrfs_super_ktype,
216 NULL, "%s", name);
217 kfree(name);
218 if (error)
219 goto fail;
220
221 return 0;
222
223fail:
224 printk(KERN_ERR "btrfs: sysfs creation for super failed\n");
225 return error;
226}
227
228int btrfs_sysfs_add_root(struct btrfs_root *root)
229{
230 int error;
231
232 error = kobject_init_and_add(&root->root_kobj, &btrfs_root_ktype,
233 &root->fs_info->super_kobj,
234 "%s", root->name);
235 if (error)
236 goto fail;
237
238 return 0;
239
240fail:
241 printk(KERN_ERR "btrfs: sysfs creation for root failed\n");
242 return error;
243}
244
245void btrfs_sysfs_del_root(struct btrfs_root *root)
246{
247 kobject_put(&root->root_kobj);
248 wait_for_completion(&root->kobj_unregister);
249}
250
251void btrfs_sysfs_del_super(struct btrfs_fs_info *fs)
252{
253 kobject_put(&fs->super_kobj);
254 wait_for_completion(&fs->kobj_unregister);
255}
256
257int btrfs_init_sysfs(void) 180int btrfs_init_sysfs(void)
258{ 181{
259 btrfs_kset = kset_create_and_add("btrfs", NULL, fs_kobj); 182 btrfs_kset = kset_create_and_add("btrfs", NULL, fs_kobj);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index c571734d5e5a..dc80f7156923 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -27,6 +27,7 @@
27#include "transaction.h" 27#include "transaction.h"
28#include "locking.h" 28#include "locking.h"
29#include "tree-log.h" 29#include "tree-log.h"
30#include "inode-map.h"
30 31
31#define BTRFS_ROOT_TRANS_TAG 0 32#define BTRFS_ROOT_TRANS_TAG 0
32 33
@@ -80,8 +81,7 @@ static noinline int join_transaction(struct btrfs_root *root)
80 INIT_LIST_HEAD(&cur_trans->pending_snapshots); 81 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
81 list_add_tail(&cur_trans->list, &root->fs_info->trans_list); 82 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
82 extent_io_tree_init(&cur_trans->dirty_pages, 83 extent_io_tree_init(&cur_trans->dirty_pages,
83 root->fs_info->btree_inode->i_mapping, 84 root->fs_info->btree_inode->i_mapping);
84 GFP_NOFS);
85 spin_lock(&root->fs_info->new_trans_lock); 85 spin_lock(&root->fs_info->new_trans_lock);
86 root->fs_info->running_transaction = cur_trans; 86 root->fs_info->running_transaction = cur_trans;
87 spin_unlock(&root->fs_info->new_trans_lock); 87 spin_unlock(&root->fs_info->new_trans_lock);
@@ -347,49 +347,6 @@ out_unlock:
347 return ret; 347 return ret;
348} 348}
349 349
350#if 0
351/*
352 * rate limit against the drop_snapshot code. This helps to slow down new
353 * operations if the drop_snapshot code isn't able to keep up.
354 */
355static void throttle_on_drops(struct btrfs_root *root)
356{
357 struct btrfs_fs_info *info = root->fs_info;
358 int harder_count = 0;
359
360harder:
361 if (atomic_read(&info->throttles)) {
362 DEFINE_WAIT(wait);
363 int thr;
364 thr = atomic_read(&info->throttle_gen);
365
366 do {
367 prepare_to_wait(&info->transaction_throttle,
368 &wait, TASK_UNINTERRUPTIBLE);
369 if (!atomic_read(&info->throttles)) {
370 finish_wait(&info->transaction_throttle, &wait);
371 break;
372 }
373 schedule();
374 finish_wait(&info->transaction_throttle, &wait);
375 } while (thr == atomic_read(&info->throttle_gen));
376 harder_count++;
377
378 if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
379 harder_count < 2)
380 goto harder;
381
382 if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
383 harder_count < 10)
384 goto harder;
385
386 if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
387 harder_count < 20)
388 goto harder;
389 }
390}
391#endif
392
393void btrfs_throttle(struct btrfs_root *root) 350void btrfs_throttle(struct btrfs_root *root)
394{ 351{
395 mutex_lock(&root->fs_info->trans_mutex); 352 mutex_lock(&root->fs_info->trans_mutex);
@@ -487,19 +444,40 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
487int btrfs_end_transaction(struct btrfs_trans_handle *trans, 444int btrfs_end_transaction(struct btrfs_trans_handle *trans,
488 struct btrfs_root *root) 445 struct btrfs_root *root)
489{ 446{
490 return __btrfs_end_transaction(trans, root, 0, 1); 447 int ret;
448
449 ret = __btrfs_end_transaction(trans, root, 0, 1);
450 if (ret)
451 return ret;
452 return 0;
491} 453}
492 454
493int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, 455int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
494 struct btrfs_root *root) 456 struct btrfs_root *root)
495{ 457{
496 return __btrfs_end_transaction(trans, root, 1, 1); 458 int ret;
459
460 ret = __btrfs_end_transaction(trans, root, 1, 1);
461 if (ret)
462 return ret;
463 return 0;
497} 464}
498 465
499int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans, 466int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
500 struct btrfs_root *root) 467 struct btrfs_root *root)
501{ 468{
502 return __btrfs_end_transaction(trans, root, 0, 0); 469 int ret;
470
471 ret = __btrfs_end_transaction(trans, root, 0, 0);
472 if (ret)
473 return ret;
474 return 0;
475}
476
477int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
478 struct btrfs_root *root)
479{
480 return __btrfs_end_transaction(trans, root, 1, 1);
503} 481}
504 482
505/* 483/*
@@ -760,8 +738,14 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
760 btrfs_update_reloc_root(trans, root); 738 btrfs_update_reloc_root(trans, root);
761 btrfs_orphan_commit_root(trans, root); 739 btrfs_orphan_commit_root(trans, root);
762 740
741 btrfs_save_ino_cache(root, trans);
742
763 if (root->commit_root != root->node) { 743 if (root->commit_root != root->node) {
744 mutex_lock(&root->fs_commit_mutex);
764 switch_commit_root(root); 745 switch_commit_root(root);
746 btrfs_unpin_free_ino(root);
747 mutex_unlock(&root->fs_commit_mutex);
748
765 btrfs_set_root_node(&root->root_item, 749 btrfs_set_root_node(&root->root_item,
766 root->node); 750 root->node);
767 } 751 }
@@ -809,97 +793,6 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
809 return ret; 793 return ret;
810} 794}
811 795
812#if 0
813/*
814 * when dropping snapshots, we generate a ton of delayed refs, and it makes
815 * sense not to join the transaction while it is trying to flush the current
816 * queue of delayed refs out.
817 *
818 * This is used by the drop snapshot code only
819 */
820static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info)
821{
822 DEFINE_WAIT(wait);
823
824 mutex_lock(&info->trans_mutex);
825 while (info->running_transaction &&
826 info->running_transaction->delayed_refs.flushing) {
827 prepare_to_wait(&info->transaction_wait, &wait,
828 TASK_UNINTERRUPTIBLE);
829 mutex_unlock(&info->trans_mutex);
830
831 schedule();
832
833 mutex_lock(&info->trans_mutex);
834 finish_wait(&info->transaction_wait, &wait);
835 }
836 mutex_unlock(&info->trans_mutex);
837 return 0;
838}
839
840/*
841 * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on
842 * all of them
843 */
844int btrfs_drop_dead_root(struct btrfs_root *root)
845{
846 struct btrfs_trans_handle *trans;
847 struct btrfs_root *tree_root = root->fs_info->tree_root;
848 unsigned long nr;
849 int ret;
850
851 while (1) {
852 /*
853 * we don't want to jump in and create a bunch of
854 * delayed refs if the transaction is starting to close
855 */
856 wait_transaction_pre_flush(tree_root->fs_info);
857 trans = btrfs_start_transaction(tree_root, 1);
858
859 /*
860 * we've joined a transaction, make sure it isn't
861 * closing right now
862 */
863 if (trans->transaction->delayed_refs.flushing) {
864 btrfs_end_transaction(trans, tree_root);
865 continue;
866 }
867
868 ret = btrfs_drop_snapshot(trans, root);
869 if (ret != -EAGAIN)
870 break;
871
872 ret = btrfs_update_root(trans, tree_root,
873 &root->root_key,
874 &root->root_item);
875 if (ret)
876 break;
877
878 nr = trans->blocks_used;
879 ret = btrfs_end_transaction(trans, tree_root);
880 BUG_ON(ret);
881
882 btrfs_btree_balance_dirty(tree_root, nr);
883 cond_resched();
884 }
885 BUG_ON(ret);
886
887 ret = btrfs_del_root(trans, tree_root, &root->root_key);
888 BUG_ON(ret);
889
890 nr = trans->blocks_used;
891 ret = btrfs_end_transaction(trans, tree_root);
892 BUG_ON(ret);
893
894 free_extent_buffer(root->node);
895 free_extent_buffer(root->commit_root);
896 kfree(root);
897
898 btrfs_btree_balance_dirty(tree_root, nr);
899 return ret;
900}
901#endif
902
903/* 796/*
904 * new snapshots need to be created at a very specific time in the 797 * new snapshots need to be created at a very specific time in the
905 * transaction commit. This does the actual creation 798 * transaction commit. This does the actual creation
@@ -930,7 +823,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
930 goto fail; 823 goto fail;
931 } 824 }
932 825
933 ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid); 826 ret = btrfs_find_free_objectid(tree_root, &objectid);
934 if (ret) { 827 if (ret) {
935 pending->error = ret; 828 pending->error = ret;
936 goto fail; 829 goto fail;
@@ -967,7 +860,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
967 BUG_ON(ret); 860 BUG_ON(ret);
968 ret = btrfs_insert_dir_item(trans, parent_root, 861 ret = btrfs_insert_dir_item(trans, parent_root,
969 dentry->d_name.name, dentry->d_name.len, 862 dentry->d_name.name, dentry->d_name.len,
970 parent_inode->i_ino, &key, 863 parent_inode, &key,
971 BTRFS_FT_DIR, index); 864 BTRFS_FT_DIR, index);
972 BUG_ON(ret); 865 BUG_ON(ret);
973 866
@@ -1009,7 +902,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1009 */ 902 */
1010 ret = btrfs_add_root_ref(trans, tree_root, objectid, 903 ret = btrfs_add_root_ref(trans, tree_root, objectid,
1011 parent_root->root_key.objectid, 904 parent_root->root_key.objectid,
1012 parent_inode->i_ino, index, 905 btrfs_ino(parent_inode), index,
1013 dentry->d_name.name, dentry->d_name.len); 906 dentry->d_name.name, dentry->d_name.len);
1014 BUG_ON(ret); 907 BUG_ON(ret);
1015 dput(parent); 908 dput(parent);
@@ -1037,6 +930,14 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1037 int ret; 930 int ret;
1038 931
1039 list_for_each_entry(pending, head, list) { 932 list_for_each_entry(pending, head, list) {
933 /*
934 * We must deal with the delayed items before creating
935 * snapshots, or we will create a snapthot with inconsistent
936 * information.
937 */
938 ret = btrfs_run_delayed_items(trans, fs_info->fs_root);
939 BUG_ON(ret);
940
1040 ret = create_pending_snapshot(trans, fs_info, pending); 941 ret = create_pending_snapshot(trans, fs_info, pending);
1041 BUG_ON(ret); 942 BUG_ON(ret);
1042 } 943 }
@@ -1290,6 +1191,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1290 BUG_ON(ret); 1191 BUG_ON(ret);
1291 } 1192 }
1292 1193
1194 ret = btrfs_run_delayed_items(trans, root);
1195 BUG_ON(ret);
1196
1293 /* 1197 /*
1294 * rename don't use btrfs_join_transaction, so, once we 1198 * rename don't use btrfs_join_transaction, so, once we
1295 * set the transaction to blocked above, we aren't going 1199 * set the transaction to blocked above, we aren't going
@@ -1316,11 +1220,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1316 ret = create_pending_snapshots(trans, root->fs_info); 1220 ret = create_pending_snapshots(trans, root->fs_info);
1317 BUG_ON(ret); 1221 BUG_ON(ret);
1318 1222
1223 ret = btrfs_run_delayed_items(trans, root);
1224 BUG_ON(ret);
1225
1319 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1226 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1320 BUG_ON(ret); 1227 BUG_ON(ret);
1321 1228
1322 WARN_ON(cur_trans != trans->transaction); 1229 WARN_ON(cur_trans != trans->transaction);
1323 1230
1231 btrfs_scrub_pause(root);
1324 /* btrfs_commit_tree_roots is responsible for getting the 1232 /* btrfs_commit_tree_roots is responsible for getting the
1325 * various roots consistent with each other. Every pointer 1233 * various roots consistent with each other. Every pointer
1326 * in the tree of tree roots has to point to the most up to date 1234 * in the tree of tree roots has to point to the most up to date
@@ -1405,6 +1313,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1405 1313
1406 mutex_unlock(&root->fs_info->trans_mutex); 1314 mutex_unlock(&root->fs_info->trans_mutex);
1407 1315
1316 btrfs_scrub_continue(root);
1317
1408 if (current->journal_info == trans) 1318 if (current->journal_info == trans)
1409 current->journal_info = NULL; 1319 current->journal_info = NULL;
1410 1320
@@ -1432,6 +1342,8 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
1432 root = list_entry(list.next, struct btrfs_root, root_list); 1342 root = list_entry(list.next, struct btrfs_root, root_list);
1433 list_del(&root->root_list); 1343 list_del(&root->root_list);
1434 1344
1345 btrfs_kill_all_delayed_nodes(root);
1346
1435 if (btrfs_header_backref_rev(root->node) < 1347 if (btrfs_header_backref_rev(root->node) <
1436 BTRFS_MIXED_BACKREF_REV) 1348 BTRFS_MIXED_BACKREF_REV)
1437 btrfs_drop_snapshot(root, NULL, 0); 1349 btrfs_drop_snapshot(root, NULL, 0);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index e441acc6c584..804c88639e5d 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -101,11 +101,8 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
101int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid); 101int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
102int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, 102int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
103 struct btrfs_root *root); 103 struct btrfs_root *root);
104int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
105 struct btrfs_root *root);
106 104
107int btrfs_add_dead_root(struct btrfs_root *root); 105int btrfs_add_dead_root(struct btrfs_root *root);
108int btrfs_drop_dead_root(struct btrfs_root *root);
109int btrfs_defrag_root(struct btrfs_root *root, int cacheonly); 106int btrfs_defrag_root(struct btrfs_root *root, int cacheonly);
110int btrfs_clean_old_snapshots(struct btrfs_root *root); 107int btrfs_clean_old_snapshots(struct btrfs_root *root);
111int btrfs_commit_transaction(struct btrfs_trans_handle *trans, 108int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
@@ -115,6 +112,8 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
115 int wait_for_unblock); 112 int wait_for_unblock);
116int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, 113int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
117 struct btrfs_root *root); 114 struct btrfs_root *root);
115int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
116 struct btrfs_root *root);
118int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, 117int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
119 struct btrfs_root *root); 118 struct btrfs_root *root);
120void btrfs_throttle(struct btrfs_root *root); 119void btrfs_throttle(struct btrfs_root *root);
diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
index 992ab425599d..3b580ee8ab1d 100644
--- a/fs/btrfs/tree-defrag.c
+++ b/fs/btrfs/tree-defrag.c
@@ -97,7 +97,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
97 ret = 0; 97 ret = 0;
98 goto out; 98 goto out;
99 } 99 }
100 btrfs_release_path(root, path); 100 btrfs_release_path(path);
101 wret = btrfs_search_slot(trans, root, &key, path, 0, 1); 101 wret = btrfs_search_slot(trans, root, &key, path, 0, 1);
102 102
103 if (wret < 0) { 103 if (wret < 0) {
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index f997ec0c1ba4..592396c6dc47 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -333,13 +333,13 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
333 goto insert; 333 goto insert;
334 334
335 if (item_size == 0) { 335 if (item_size == 0) {
336 btrfs_release_path(root, path); 336 btrfs_release_path(path);
337 return 0; 337 return 0;
338 } 338 }
339 dst_copy = kmalloc(item_size, GFP_NOFS); 339 dst_copy = kmalloc(item_size, GFP_NOFS);
340 src_copy = kmalloc(item_size, GFP_NOFS); 340 src_copy = kmalloc(item_size, GFP_NOFS);
341 if (!dst_copy || !src_copy) { 341 if (!dst_copy || !src_copy) {
342 btrfs_release_path(root, path); 342 btrfs_release_path(path);
343 kfree(dst_copy); 343 kfree(dst_copy);
344 kfree(src_copy); 344 kfree(src_copy);
345 return -ENOMEM; 345 return -ENOMEM;
@@ -361,13 +361,13 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
361 * sync 361 * sync
362 */ 362 */
363 if (ret == 0) { 363 if (ret == 0) {
364 btrfs_release_path(root, path); 364 btrfs_release_path(path);
365 return 0; 365 return 0;
366 } 366 }
367 367
368 } 368 }
369insert: 369insert:
370 btrfs_release_path(root, path); 370 btrfs_release_path(path);
371 /* try to insert the key into the destination tree */ 371 /* try to insert the key into the destination tree */
372 ret = btrfs_insert_empty_item(trans, root, path, 372 ret = btrfs_insert_empty_item(trans, root, path,
373 key, item_size); 373 key, item_size);
@@ -382,7 +382,6 @@ insert:
382 } else if (found_size < item_size) { 382 } else if (found_size < item_size) {
383 ret = btrfs_extend_item(trans, root, path, 383 ret = btrfs_extend_item(trans, root, path,
384 item_size - found_size); 384 item_size - found_size);
385 BUG_ON(ret);
386 } 385 }
387 } else if (ret) { 386 } else if (ret) {
388 return ret; 387 return ret;
@@ -438,7 +437,7 @@ insert:
438 } 437 }
439no_copy: 438no_copy:
440 btrfs_mark_buffer_dirty(path->nodes[0]); 439 btrfs_mark_buffer_dirty(path->nodes[0]);
441 btrfs_release_path(root, path); 440 btrfs_release_path(path);
442 return 0; 441 return 0;
443} 442}
444 443
@@ -519,7 +518,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
519 * file. This must be done before the btrfs_drop_extents run 518 * file. This must be done before the btrfs_drop_extents run
520 * so we don't try to drop this extent. 519 * so we don't try to drop this extent.
521 */ 520 */
522 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, 521 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
523 start, 0); 522 start, 0);
524 523
525 if (ret == 0 && 524 if (ret == 0 &&
@@ -544,11 +543,11 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
544 * we don't have to do anything 543 * we don't have to do anything
545 */ 544 */
546 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) { 545 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
547 btrfs_release_path(root, path); 546 btrfs_release_path(path);
548 goto out; 547 goto out;
549 } 548 }
550 } 549 }
551 btrfs_release_path(root, path); 550 btrfs_release_path(path);
552 551
553 saved_nbytes = inode_get_bytes(inode); 552 saved_nbytes = inode_get_bytes(inode);
554 /* drop any overlapping extents */ 553 /* drop any overlapping extents */
@@ -590,6 +589,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
590 ins.objectid, ins.offset, 589 ins.objectid, ins.offset,
591 0, root->root_key.objectid, 590 0, root->root_key.objectid,
592 key->objectid, offset); 591 key->objectid, offset);
592 BUG_ON(ret);
593 } else { 593 } else {
594 /* 594 /*
595 * insert the extent pointer in the extent 595 * insert the extent pointer in the extent
@@ -600,7 +600,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
600 key->objectid, offset, &ins); 600 key->objectid, offset, &ins);
601 BUG_ON(ret); 601 BUG_ON(ret);
602 } 602 }
603 btrfs_release_path(root, path); 603 btrfs_release_path(path);
604 604
605 if (btrfs_file_extent_compression(eb, item)) { 605 if (btrfs_file_extent_compression(eb, item)) {
606 csum_start = ins.objectid; 606 csum_start = ins.objectid;
@@ -614,7 +614,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
614 614
615 ret = btrfs_lookup_csums_range(root->log_root, 615 ret = btrfs_lookup_csums_range(root->log_root,
616 csum_start, csum_end - 1, 616 csum_start, csum_end - 1,
617 &ordered_sums); 617 &ordered_sums, 0);
618 BUG_ON(ret); 618 BUG_ON(ret);
619 while (!list_empty(&ordered_sums)) { 619 while (!list_empty(&ordered_sums)) {
620 struct btrfs_ordered_sum *sums; 620 struct btrfs_ordered_sum *sums;
@@ -629,7 +629,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
629 kfree(sums); 629 kfree(sums);
630 } 630 }
631 } else { 631 } else {
632 btrfs_release_path(root, path); 632 btrfs_release_path(path);
633 } 633 }
634 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 634 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
635 /* inline extents are easy, we just overwrite them */ 635 /* inline extents are easy, we just overwrite them */
@@ -675,10 +675,13 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
675 return -ENOMEM; 675 return -ENOMEM;
676 676
677 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); 677 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
678 btrfs_release_path(root, path); 678 btrfs_release_path(path);
679 679
680 inode = read_one_inode(root, location.objectid); 680 inode = read_one_inode(root, location.objectid);
681 BUG_ON(!inode); 681 if (!inode) {
682 kfree(name);
683 return -EIO;
684 }
682 685
683 ret = link_to_fixup_dir(trans, root, path, location.objectid); 686 ret = link_to_fixup_dir(trans, root, path, location.objectid);
684 BUG_ON(ret); 687 BUG_ON(ret);
@@ -713,7 +716,7 @@ static noinline int inode_in_dir(struct btrfs_root *root,
713 goto out; 716 goto out;
714 } else 717 } else
715 goto out; 718 goto out;
716 btrfs_release_path(root, path); 719 btrfs_release_path(path);
717 720
718 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0); 721 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
719 if (di && !IS_ERR(di)) { 722 if (di && !IS_ERR(di)) {
@@ -724,7 +727,7 @@ static noinline int inode_in_dir(struct btrfs_root *root,
724 goto out; 727 goto out;
725 match = 1; 728 match = 1;
726out: 729out:
727 btrfs_release_path(root, path); 730 btrfs_release_path(path);
728 return match; 731 return match;
729} 732}
730 733
@@ -817,7 +820,10 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
817 return -ENOENT; 820 return -ENOENT;
818 821
819 inode = read_one_inode(root, key->objectid); 822 inode = read_one_inode(root, key->objectid);
820 BUG_ON(!inode); 823 if (!inode) {
824 iput(dir);
825 return -EIO;
826 }
821 827
822 ref_ptr = btrfs_item_ptr_offset(eb, slot); 828 ref_ptr = btrfs_item_ptr_offset(eb, slot);
823 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); 829 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
@@ -832,7 +838,7 @@ again:
832 read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen); 838 read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen);
833 839
834 /* if we already have a perfect match, we're done */ 840 /* if we already have a perfect match, we're done */
835 if (inode_in_dir(root, path, dir->i_ino, inode->i_ino, 841 if (inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
836 btrfs_inode_ref_index(eb, ref), 842 btrfs_inode_ref_index(eb, ref),
837 name, namelen)) { 843 name, namelen)) {
838 goto out; 844 goto out;
@@ -884,7 +890,7 @@ again:
884 if (!backref_in_log(log, key, victim_name, 890 if (!backref_in_log(log, key, victim_name,
885 victim_name_len)) { 891 victim_name_len)) {
886 btrfs_inc_nlink(inode); 892 btrfs_inc_nlink(inode);
887 btrfs_release_path(root, path); 893 btrfs_release_path(path);
888 894
889 ret = btrfs_unlink_inode(trans, root, dir, 895 ret = btrfs_unlink_inode(trans, root, dir,
890 inode, victim_name, 896 inode, victim_name,
@@ -901,7 +907,7 @@ again:
901 */ 907 */
902 search_done = 1; 908 search_done = 1;
903 } 909 }
904 btrfs_release_path(root, path); 910 btrfs_release_path(path);
905 911
906insert: 912insert:
907 /* insert our name */ 913 /* insert our name */
@@ -922,7 +928,7 @@ out:
922 BUG_ON(ret); 928 BUG_ON(ret);
923 929
924out_nowrite: 930out_nowrite:
925 btrfs_release_path(root, path); 931 btrfs_release_path(path);
926 iput(dir); 932 iput(dir);
927 iput(inode); 933 iput(inode);
928 return 0; 934 return 0;
@@ -960,8 +966,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
960 unsigned long ptr; 966 unsigned long ptr;
961 unsigned long ptr_end; 967 unsigned long ptr_end;
962 int name_len; 968 int name_len;
969 u64 ino = btrfs_ino(inode);
963 970
964 key.objectid = inode->i_ino; 971 key.objectid = ino;
965 key.type = BTRFS_INODE_REF_KEY; 972 key.type = BTRFS_INODE_REF_KEY;
966 key.offset = (u64)-1; 973 key.offset = (u64)-1;
967 974
@@ -980,7 +987,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
980 } 987 }
981 btrfs_item_key_to_cpu(path->nodes[0], &key, 988 btrfs_item_key_to_cpu(path->nodes[0], &key,
982 path->slots[0]); 989 path->slots[0]);
983 if (key.objectid != inode->i_ino || 990 if (key.objectid != ino ||
984 key.type != BTRFS_INODE_REF_KEY) 991 key.type != BTRFS_INODE_REF_KEY)
985 break; 992 break;
986 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 993 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
@@ -999,9 +1006,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
999 if (key.offset == 0) 1006 if (key.offset == 0)
1000 break; 1007 break;
1001 key.offset--; 1008 key.offset--;
1002 btrfs_release_path(root, path); 1009 btrfs_release_path(path);
1003 } 1010 }
1004 btrfs_release_path(root, path); 1011 btrfs_release_path(path);
1005 if (nlink != inode->i_nlink) { 1012 if (nlink != inode->i_nlink) {
1006 inode->i_nlink = nlink; 1013 inode->i_nlink = nlink;
1007 btrfs_update_inode(trans, root, inode); 1014 btrfs_update_inode(trans, root, inode);
@@ -1011,10 +1018,10 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1011 if (inode->i_nlink == 0) { 1018 if (inode->i_nlink == 0) {
1012 if (S_ISDIR(inode->i_mode)) { 1019 if (S_ISDIR(inode->i_mode)) {
1013 ret = replay_dir_deletes(trans, root, NULL, path, 1020 ret = replay_dir_deletes(trans, root, NULL, path,
1014 inode->i_ino, 1); 1021 ino, 1);
1015 BUG_ON(ret); 1022 BUG_ON(ret);
1016 } 1023 }
1017 ret = insert_orphan_item(trans, root, inode->i_ino); 1024 ret = insert_orphan_item(trans, root, ino);
1018 BUG_ON(ret); 1025 BUG_ON(ret);
1019 } 1026 }
1020 btrfs_free_path(path); 1027 btrfs_free_path(path);
@@ -1050,11 +1057,13 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1050 break; 1057 break;
1051 1058
1052 ret = btrfs_del_item(trans, root, path); 1059 ret = btrfs_del_item(trans, root, path);
1053 BUG_ON(ret); 1060 if (ret)
1061 goto out;
1054 1062
1055 btrfs_release_path(root, path); 1063 btrfs_release_path(path);
1056 inode = read_one_inode(root, key.offset); 1064 inode = read_one_inode(root, key.offset);
1057 BUG_ON(!inode); 1065 if (!inode)
1066 return -EIO;
1058 1067
1059 ret = fixup_inode_link_count(trans, root, inode); 1068 ret = fixup_inode_link_count(trans, root, inode);
1060 BUG_ON(ret); 1069 BUG_ON(ret);
@@ -1068,8 +1077,10 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1068 */ 1077 */
1069 key.offset = (u64)-1; 1078 key.offset = (u64)-1;
1070 } 1079 }
1071 btrfs_release_path(root, path); 1080 ret = 0;
1072 return 0; 1081out:
1082 btrfs_release_path(path);
1083 return ret;
1073} 1084}
1074 1085
1075 1086
@@ -1088,7 +1099,8 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1088 struct inode *inode; 1099 struct inode *inode;
1089 1100
1090 inode = read_one_inode(root, objectid); 1101 inode = read_one_inode(root, objectid);
1091 BUG_ON(!inode); 1102 if (!inode)
1103 return -EIO;
1092 1104
1093 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1105 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1094 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); 1106 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
@@ -1096,7 +1108,7 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1096 1108
1097 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 1109 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1098 1110
1099 btrfs_release_path(root, path); 1111 btrfs_release_path(path);
1100 if (ret == 0) { 1112 if (ret == 0) {
1101 btrfs_inc_nlink(inode); 1113 btrfs_inc_nlink(inode);
1102 btrfs_update_inode(trans, root, inode); 1114 btrfs_update_inode(trans, root, inode);
@@ -1175,7 +1187,8 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1175 int ret; 1187 int ret;
1176 1188
1177 dir = read_one_inode(root, key->objectid); 1189 dir = read_one_inode(root, key->objectid);
1178 BUG_ON(!dir); 1190 if (!dir)
1191 return -EIO;
1179 1192
1180 name_len = btrfs_dir_name_len(eb, di); 1193 name_len = btrfs_dir_name_len(eb, di);
1181 name = kmalloc(name_len, GFP_NOFS); 1194 name = kmalloc(name_len, GFP_NOFS);
@@ -1192,7 +1205,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1192 exists = 1; 1205 exists = 1;
1193 else 1206 else
1194 exists = 0; 1207 exists = 0;
1195 btrfs_release_path(root, path); 1208 btrfs_release_path(path);
1196 1209
1197 if (key->type == BTRFS_DIR_ITEM_KEY) { 1210 if (key->type == BTRFS_DIR_ITEM_KEY) {
1198 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, 1211 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
@@ -1205,7 +1218,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1205 } else { 1218 } else {
1206 BUG(); 1219 BUG();
1207 } 1220 }
1208 if (!dst_di || IS_ERR(dst_di)) { 1221 if (IS_ERR_OR_NULL(dst_di)) {
1209 /* we need a sequence number to insert, so we only 1222 /* we need a sequence number to insert, so we only
1210 * do inserts for the BTRFS_DIR_INDEX_KEY types 1223 * do inserts for the BTRFS_DIR_INDEX_KEY types
1211 */ 1224 */
@@ -1236,13 +1249,13 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1236 if (key->type == BTRFS_DIR_INDEX_KEY) 1249 if (key->type == BTRFS_DIR_INDEX_KEY)
1237 goto insert; 1250 goto insert;
1238out: 1251out:
1239 btrfs_release_path(root, path); 1252 btrfs_release_path(path);
1240 kfree(name); 1253 kfree(name);
1241 iput(dir); 1254 iput(dir);
1242 return 0; 1255 return 0;
1243 1256
1244insert: 1257insert:
1245 btrfs_release_path(root, path); 1258 btrfs_release_path(path);
1246 ret = insert_one_name(trans, root, path, key->objectid, key->offset, 1259 ret = insert_one_name(trans, root, path, key->objectid, key->offset,
1247 name, name_len, log_type, &log_key); 1260 name, name_len, log_type, &log_key);
1248 1261
@@ -1363,7 +1376,7 @@ next:
1363 *end_ret = found_end; 1376 *end_ret = found_end;
1364 ret = 0; 1377 ret = 0;
1365out: 1378out:
1366 btrfs_release_path(root, path); 1379 btrfs_release_path(path);
1367 return ret; 1380 return ret;
1368} 1381}
1369 1382
@@ -1426,12 +1439,15 @@ again:
1426 dir_key->offset, 1439 dir_key->offset,
1427 name, name_len, 0); 1440 name, name_len, 0);
1428 } 1441 }
1429 if (!log_di || IS_ERR(log_di)) { 1442 if (IS_ERR_OR_NULL(log_di)) {
1430 btrfs_dir_item_key_to_cpu(eb, di, &location); 1443 btrfs_dir_item_key_to_cpu(eb, di, &location);
1431 btrfs_release_path(root, path); 1444 btrfs_release_path(path);
1432 btrfs_release_path(log, log_path); 1445 btrfs_release_path(log_path);
1433 inode = read_one_inode(root, location.objectid); 1446 inode = read_one_inode(root, location.objectid);
1434 BUG_ON(!inode); 1447 if (!inode) {
1448 kfree(name);
1449 return -EIO;
1450 }
1435 1451
1436 ret = link_to_fixup_dir(trans, root, 1452 ret = link_to_fixup_dir(trans, root,
1437 path, location.objectid); 1453 path, location.objectid);
@@ -1453,7 +1469,7 @@ again:
1453 ret = 0; 1469 ret = 0;
1454 goto out; 1470 goto out;
1455 } 1471 }
1456 btrfs_release_path(log, log_path); 1472 btrfs_release_path(log_path);
1457 kfree(name); 1473 kfree(name);
1458 1474
1459 ptr = (unsigned long)(di + 1); 1475 ptr = (unsigned long)(di + 1);
@@ -1461,8 +1477,8 @@ again:
1461 } 1477 }
1462 ret = 0; 1478 ret = 0;
1463out: 1479out:
1464 btrfs_release_path(root, path); 1480 btrfs_release_path(path);
1465 btrfs_release_path(log, log_path); 1481 btrfs_release_path(log_path);
1466 return ret; 1482 return ret;
1467} 1483}
1468 1484
@@ -1550,7 +1566,7 @@ again:
1550 break; 1566 break;
1551 dir_key.offset = found_key.offset + 1; 1567 dir_key.offset = found_key.offset + 1;
1552 } 1568 }
1553 btrfs_release_path(root, path); 1569 btrfs_release_path(path);
1554 if (range_end == (u64)-1) 1570 if (range_end == (u64)-1)
1555 break; 1571 break;
1556 range_start = range_end + 1; 1572 range_start = range_end + 1;
@@ -1561,11 +1577,11 @@ next_type:
1561 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) { 1577 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
1562 key_type = BTRFS_DIR_LOG_INDEX_KEY; 1578 key_type = BTRFS_DIR_LOG_INDEX_KEY;
1563 dir_key.type = BTRFS_DIR_INDEX_KEY; 1579 dir_key.type = BTRFS_DIR_INDEX_KEY;
1564 btrfs_release_path(root, path); 1580 btrfs_release_path(path);
1565 goto again; 1581 goto again;
1566 } 1582 }
1567out: 1583out:
1568 btrfs_release_path(root, path); 1584 btrfs_release_path(path);
1569 btrfs_free_path(log_path); 1585 btrfs_free_path(log_path);
1570 iput(dir); 1586 iput(dir);
1571 return ret; 1587 return ret;
@@ -2093,7 +2109,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2093 * the running transaction open, so a full commit can't hop 2109 * the running transaction open, so a full commit can't hop
2094 * in and cause problems either. 2110 * in and cause problems either.
2095 */ 2111 */
2112 btrfs_scrub_pause_super(root);
2096 write_ctree_super(trans, root->fs_info->tree_root, 1); 2113 write_ctree_super(trans, root->fs_info->tree_root, 1);
2114 btrfs_scrub_continue_super(root);
2097 ret = 0; 2115 ret = 0;
2098 2116
2099 mutex_lock(&root->log_mutex); 2117 mutex_lock(&root->log_mutex);
@@ -2197,6 +2215,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2197 int ret; 2215 int ret;
2198 int err = 0; 2216 int err = 0;
2199 int bytes_del = 0; 2217 int bytes_del = 0;
2218 u64 dir_ino = btrfs_ino(dir);
2200 2219
2201 if (BTRFS_I(dir)->logged_trans < trans->transid) 2220 if (BTRFS_I(dir)->logged_trans < trans->transid)
2202 return 0; 2221 return 0;
@@ -2214,7 +2233,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2214 goto out_unlock; 2233 goto out_unlock;
2215 } 2234 }
2216 2235
2217 di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, 2236 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
2218 name, name_len, -1); 2237 name, name_len, -1);
2219 if (IS_ERR(di)) { 2238 if (IS_ERR(di)) {
2220 err = PTR_ERR(di); 2239 err = PTR_ERR(di);
@@ -2225,8 +2244,8 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2225 bytes_del += name_len; 2244 bytes_del += name_len;
2226 BUG_ON(ret); 2245 BUG_ON(ret);
2227 } 2246 }
2228 btrfs_release_path(log, path); 2247 btrfs_release_path(path);
2229 di = btrfs_lookup_dir_index_item(trans, log, path, dir->i_ino, 2248 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
2230 index, name, name_len, -1); 2249 index, name, name_len, -1);
2231 if (IS_ERR(di)) { 2250 if (IS_ERR(di)) {
2232 err = PTR_ERR(di); 2251 err = PTR_ERR(di);
@@ -2244,10 +2263,10 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2244 if (bytes_del) { 2263 if (bytes_del) {
2245 struct btrfs_key key; 2264 struct btrfs_key key;
2246 2265
2247 key.objectid = dir->i_ino; 2266 key.objectid = dir_ino;
2248 key.offset = 0; 2267 key.offset = 0;
2249 key.type = BTRFS_INODE_ITEM_KEY; 2268 key.type = BTRFS_INODE_ITEM_KEY;
2250 btrfs_release_path(log, path); 2269 btrfs_release_path(path);
2251 2270
2252 ret = btrfs_search_slot(trans, log, &key, path, 0, 1); 2271 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
2253 if (ret < 0) { 2272 if (ret < 0) {
@@ -2269,7 +2288,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2269 btrfs_mark_buffer_dirty(path->nodes[0]); 2288 btrfs_mark_buffer_dirty(path->nodes[0]);
2270 } else 2289 } else
2271 ret = 0; 2290 ret = 0;
2272 btrfs_release_path(log, path); 2291 btrfs_release_path(path);
2273 } 2292 }
2274fail: 2293fail:
2275 btrfs_free_path(path); 2294 btrfs_free_path(path);
@@ -2303,7 +2322,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
2303 log = root->log_root; 2322 log = root->log_root;
2304 mutex_lock(&BTRFS_I(inode)->log_mutex); 2323 mutex_lock(&BTRFS_I(inode)->log_mutex);
2305 2324
2306 ret = btrfs_del_inode_ref(trans, log, name, name_len, inode->i_ino, 2325 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
2307 dirid, &index); 2326 dirid, &index);
2308 mutex_unlock(&BTRFS_I(inode)->log_mutex); 2327 mutex_unlock(&BTRFS_I(inode)->log_mutex);
2309 if (ret == -ENOSPC) { 2328 if (ret == -ENOSPC) {
@@ -2344,7 +2363,7 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
2344 struct btrfs_dir_log_item); 2363 struct btrfs_dir_log_item);
2345 btrfs_set_dir_log_end(path->nodes[0], item, last_offset); 2364 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
2346 btrfs_mark_buffer_dirty(path->nodes[0]); 2365 btrfs_mark_buffer_dirty(path->nodes[0]);
2347 btrfs_release_path(log, path); 2366 btrfs_release_path(path);
2348 return 0; 2367 return 0;
2349} 2368}
2350 2369
@@ -2369,13 +2388,14 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2369 int nritems; 2388 int nritems;
2370 u64 first_offset = min_offset; 2389 u64 first_offset = min_offset;
2371 u64 last_offset = (u64)-1; 2390 u64 last_offset = (u64)-1;
2391 u64 ino = btrfs_ino(inode);
2372 2392
2373 log = root->log_root; 2393 log = root->log_root;
2374 max_key.objectid = inode->i_ino; 2394 max_key.objectid = ino;
2375 max_key.offset = (u64)-1; 2395 max_key.offset = (u64)-1;
2376 max_key.type = key_type; 2396 max_key.type = key_type;
2377 2397
2378 min_key.objectid = inode->i_ino; 2398 min_key.objectid = ino;
2379 min_key.type = key_type; 2399 min_key.type = key_type;
2380 min_key.offset = min_offset; 2400 min_key.offset = min_offset;
2381 2401
@@ -2388,18 +2408,17 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2388 * we didn't find anything from this transaction, see if there 2408 * we didn't find anything from this transaction, see if there
2389 * is anything at all 2409 * is anything at all
2390 */ 2410 */
2391 if (ret != 0 || min_key.objectid != inode->i_ino || 2411 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
2392 min_key.type != key_type) { 2412 min_key.objectid = ino;
2393 min_key.objectid = inode->i_ino;
2394 min_key.type = key_type; 2413 min_key.type = key_type;
2395 min_key.offset = (u64)-1; 2414 min_key.offset = (u64)-1;
2396 btrfs_release_path(root, path); 2415 btrfs_release_path(path);
2397 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 2416 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
2398 if (ret < 0) { 2417 if (ret < 0) {
2399 btrfs_release_path(root, path); 2418 btrfs_release_path(path);
2400 return ret; 2419 return ret;
2401 } 2420 }
2402 ret = btrfs_previous_item(root, path, inode->i_ino, key_type); 2421 ret = btrfs_previous_item(root, path, ino, key_type);
2403 2422
2404 /* if ret == 0 there are items for this type, 2423 /* if ret == 0 there are items for this type,
2405 * create a range to tell us the last key of this type. 2424 * create a range to tell us the last key of this type.
@@ -2417,7 +2436,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2417 } 2436 }
2418 2437
2419 /* go backward to find any previous key */ 2438 /* go backward to find any previous key */
2420 ret = btrfs_previous_item(root, path, inode->i_ino, key_type); 2439 ret = btrfs_previous_item(root, path, ino, key_type);
2421 if (ret == 0) { 2440 if (ret == 0) {
2422 struct btrfs_key tmp; 2441 struct btrfs_key tmp;
2423 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 2442 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
@@ -2432,7 +2451,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2432 } 2451 }
2433 } 2452 }
2434 } 2453 }
2435 btrfs_release_path(root, path); 2454 btrfs_release_path(path);
2436 2455
2437 /* find the first key from this transaction again */ 2456 /* find the first key from this transaction again */
2438 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 2457 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
@@ -2452,8 +2471,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2452 for (i = path->slots[0]; i < nritems; i++) { 2471 for (i = path->slots[0]; i < nritems; i++) {
2453 btrfs_item_key_to_cpu(src, &min_key, i); 2472 btrfs_item_key_to_cpu(src, &min_key, i);
2454 2473
2455 if (min_key.objectid != inode->i_ino || 2474 if (min_key.objectid != ino || min_key.type != key_type)
2456 min_key.type != key_type)
2457 goto done; 2475 goto done;
2458 ret = overwrite_item(trans, log, dst_path, src, i, 2476 ret = overwrite_item(trans, log, dst_path, src, i,
2459 &min_key); 2477 &min_key);
@@ -2474,7 +2492,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2474 goto done; 2492 goto done;
2475 } 2493 }
2476 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 2494 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
2477 if (tmp.objectid != inode->i_ino || tmp.type != key_type) { 2495 if (tmp.objectid != ino || tmp.type != key_type) {
2478 last_offset = (u64)-1; 2496 last_offset = (u64)-1;
2479 goto done; 2497 goto done;
2480 } 2498 }
@@ -2490,8 +2508,8 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2490 } 2508 }
2491 } 2509 }
2492done: 2510done:
2493 btrfs_release_path(root, path); 2511 btrfs_release_path(path);
2494 btrfs_release_path(log, dst_path); 2512 btrfs_release_path(dst_path);
2495 2513
2496 if (err == 0) { 2514 if (err == 0) {
2497 *last_offset_ret = last_offset; 2515 *last_offset_ret = last_offset;
@@ -2500,8 +2518,7 @@ done:
2500 * is valid 2518 * is valid
2501 */ 2519 */
2502 ret = insert_dir_log_key(trans, log, path, key_type, 2520 ret = insert_dir_log_key(trans, log, path, key_type,
2503 inode->i_ino, first_offset, 2521 ino, first_offset, last_offset);
2504 last_offset);
2505 if (ret) 2522 if (ret)
2506 err = ret; 2523 err = ret;
2507 } 2524 }
@@ -2587,10 +2604,11 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
2587 break; 2604 break;
2588 2605
2589 ret = btrfs_del_item(trans, log, path); 2606 ret = btrfs_del_item(trans, log, path);
2590 BUG_ON(ret); 2607 if (ret)
2591 btrfs_release_path(log, path); 2608 break;
2609 btrfs_release_path(path);
2592 } 2610 }
2593 btrfs_release_path(log, path); 2611 btrfs_release_path(path);
2594 return ret; 2612 return ret;
2595} 2613}
2596 2614
@@ -2665,6 +2683,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
2665 extent = btrfs_item_ptr(src, start_slot + i, 2683 extent = btrfs_item_ptr(src, start_slot + i,
2666 struct btrfs_file_extent_item); 2684 struct btrfs_file_extent_item);
2667 2685
2686 if (btrfs_file_extent_generation(src, extent) < trans->transid)
2687 continue;
2688
2668 found_type = btrfs_file_extent_type(src, extent); 2689 found_type = btrfs_file_extent_type(src, extent);
2669 if (found_type == BTRFS_FILE_EXTENT_REG || 2690 if (found_type == BTRFS_FILE_EXTENT_REG ||
2670 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 2691 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
@@ -2689,14 +2710,14 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
2689 ret = btrfs_lookup_csums_range( 2710 ret = btrfs_lookup_csums_range(
2690 log->fs_info->csum_root, 2711 log->fs_info->csum_root,
2691 ds + cs, ds + cs + cl - 1, 2712 ds + cs, ds + cs + cl - 1,
2692 &ordered_sums); 2713 &ordered_sums, 0);
2693 BUG_ON(ret); 2714 BUG_ON(ret);
2694 } 2715 }
2695 } 2716 }
2696 } 2717 }
2697 2718
2698 btrfs_mark_buffer_dirty(dst_path->nodes[0]); 2719 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
2699 btrfs_release_path(log, dst_path); 2720 btrfs_release_path(dst_path);
2700 kfree(ins_data); 2721 kfree(ins_data);
2701 2722
2702 /* 2723 /*
@@ -2745,6 +2766,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
2745 int nritems; 2766 int nritems;
2746 int ins_start_slot = 0; 2767 int ins_start_slot = 0;
2747 int ins_nr; 2768 int ins_nr;
2769 u64 ino = btrfs_ino(inode);
2748 2770
2749 log = root->log_root; 2771 log = root->log_root;
2750 2772
@@ -2757,11 +2779,11 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
2757 return -ENOMEM; 2779 return -ENOMEM;
2758 } 2780 }
2759 2781
2760 min_key.objectid = inode->i_ino; 2782 min_key.objectid = ino;
2761 min_key.type = BTRFS_INODE_ITEM_KEY; 2783 min_key.type = BTRFS_INODE_ITEM_KEY;
2762 min_key.offset = 0; 2784 min_key.offset = 0;
2763 2785
2764 max_key.objectid = inode->i_ino; 2786 max_key.objectid = ino;
2765 2787
2766 /* today the code can only do partial logging of directories */ 2788 /* today the code can only do partial logging of directories */
2767 if (!S_ISDIR(inode->i_mode)) 2789 if (!S_ISDIR(inode->i_mode))
@@ -2773,6 +2795,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
2773 max_key.type = (u8)-1; 2795 max_key.type = (u8)-1;
2774 max_key.offset = (u64)-1; 2796 max_key.offset = (u64)-1;
2775 2797
2798 ret = btrfs_commit_inode_delayed_items(trans, inode);
2799 if (ret) {
2800 btrfs_free_path(path);
2801 btrfs_free_path(dst_path);
2802 return ret;
2803 }
2804
2776 mutex_lock(&BTRFS_I(inode)->log_mutex); 2805 mutex_lock(&BTRFS_I(inode)->log_mutex);
2777 2806
2778 /* 2807 /*
@@ -2784,8 +2813,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
2784 2813
2785 if (inode_only == LOG_INODE_EXISTS) 2814 if (inode_only == LOG_INODE_EXISTS)
2786 max_key_type = BTRFS_XATTR_ITEM_KEY; 2815 max_key_type = BTRFS_XATTR_ITEM_KEY;
2787 ret = drop_objectid_items(trans, log, path, 2816 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
2788 inode->i_ino, max_key_type);
2789 } else { 2817 } else {
2790 ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0); 2818 ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0);
2791 } 2819 }
@@ -2803,7 +2831,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
2803 break; 2831 break;
2804again: 2832again:
2805 /* note, ins_nr might be > 0 here, cleanup outside the loop */ 2833 /* note, ins_nr might be > 0 here, cleanup outside the loop */
2806 if (min_key.objectid != inode->i_ino) 2834 if (min_key.objectid != ino)
2807 break; 2835 break;
2808 if (min_key.type > max_key.type) 2836 if (min_key.type > max_key.type)
2809 break; 2837 break;
@@ -2845,7 +2873,7 @@ next_slot:
2845 } 2873 }
2846 ins_nr = 0; 2874 ins_nr = 0;
2847 } 2875 }
2848 btrfs_release_path(root, path); 2876 btrfs_release_path(path);
2849 2877
2850 if (min_key.offset < (u64)-1) 2878 if (min_key.offset < (u64)-1)
2851 min_key.offset++; 2879 min_key.offset++;
@@ -2868,8 +2896,8 @@ next_slot:
2868 } 2896 }
2869 WARN_ON(ins_nr); 2897 WARN_ON(ins_nr);
2870 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) { 2898 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
2871 btrfs_release_path(root, path); 2899 btrfs_release_path(path);
2872 btrfs_release_path(log, dst_path); 2900 btrfs_release_path(dst_path);
2873 ret = log_directory_changes(trans, root, inode, path, dst_path); 2901 ret = log_directory_changes(trans, root, inode, path, dst_path);
2874 if (ret) { 2902 if (ret) {
2875 err = ret; 2903 err = ret;
@@ -3136,7 +3164,7 @@ again:
3136 } 3164 }
3137 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 3165 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3138 path->slots[0]); 3166 path->slots[0]);
3139 btrfs_release_path(log_root_tree, path); 3167 btrfs_release_path(path);
3140 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID) 3168 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
3141 break; 3169 break;
3142 3170
@@ -3171,7 +3199,7 @@ again:
3171 if (found_key.offset == 0) 3199 if (found_key.offset == 0)
3172 break; 3200 break;
3173 } 3201 }
3174 btrfs_release_path(log_root_tree, path); 3202 btrfs_release_path(path);
3175 3203
3176 /* step one is to pin it all, step two is to replay just inodes */ 3204 /* step one is to pin it all, step two is to replay just inodes */
3177 if (wc.pin) { 3205 if (wc.pin) {
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index 3dfae84c8cc8..2270ac58d746 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -38,7 +38,6 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
38 struct btrfs_root *root, 38 struct btrfs_root *root,
39 const char *name, int name_len, 39 const char *name, int name_len,
40 struct inode *inode, u64 dirid); 40 struct inode *inode, u64 dirid);
41int btrfs_join_running_log_trans(struct btrfs_root *root);
42int btrfs_end_log_trans(struct btrfs_root *root); 41int btrfs_end_log_trans(struct btrfs_root *root);
43int btrfs_pin_log_trans(struct btrfs_root *root); 42int btrfs_pin_log_trans(struct btrfs_root *root);
44int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, 43int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/version.sh b/fs/btrfs/version.sh
deleted file mode 100644
index 1ca1952fd917..000000000000
--- a/fs/btrfs/version.sh
+++ /dev/null
@@ -1,43 +0,0 @@
1#!/bin/bash
2#
3# determine-version -- report a useful version for releases
4#
5# Copyright 2008, Aron Griffis <agriffis@n01se.net>
6# Copyright 2008, Oracle
7# Released under the GNU GPLv2
8
9v="v0.16"
10
11which git &> /dev/null
12if [ $? == 0 ]; then
13 git branch >& /dev/null
14 if [ $? == 0 ]; then
15 if head=`git rev-parse --verify HEAD 2>/dev/null`; then
16 if tag=`git describe --tags 2>/dev/null`; then
17 v="$tag"
18 fi
19
20 # Are there uncommitted changes?
21 git update-index --refresh --unmerged > /dev/null
22 if git diff-index --name-only HEAD | \
23 grep -v "^scripts/package" \
24 | read dummy; then
25 v="$v"-dirty
26 fi
27 fi
28 fi
29fi
30
31echo "#ifndef __BUILD_VERSION" > .build-version.h
32echo "#define __BUILD_VERSION" >> .build-version.h
33echo "#define BTRFS_BUILD_VERSION \"Btrfs $v\"" >> .build-version.h
34echo "#endif" >> .build-version.h
35
36diff -q version.h .build-version.h >& /dev/null
37
38if [ $? == 0 ]; then
39 rm .build-version.h
40 exit 0
41fi
42
43mv .build-version.h version.h
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index c7367ae5a3e6..c48214ef5c09 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -38,22 +38,9 @@ static int init_first_rw_device(struct btrfs_trans_handle *trans,
38 struct btrfs_device *device); 38 struct btrfs_device *device);
39static int btrfs_relocate_sys_chunks(struct btrfs_root *root); 39static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
40 40
41#define map_lookup_size(n) (sizeof(struct map_lookup) + \
42 (sizeof(struct btrfs_bio_stripe) * (n)))
43
44static DEFINE_MUTEX(uuid_mutex); 41static DEFINE_MUTEX(uuid_mutex);
45static LIST_HEAD(fs_uuids); 42static LIST_HEAD(fs_uuids);
46 43
47void btrfs_lock_volumes(void)
48{
49 mutex_lock(&uuid_mutex);
50}
51
52void btrfs_unlock_volumes(void)
53{
54 mutex_unlock(&uuid_mutex);
55}
56
57static void lock_chunks(struct btrfs_root *root) 44static void lock_chunks(struct btrfs_root *root)
58{ 45{
59 mutex_lock(&root->fs_info->chunk_mutex); 46 mutex_lock(&root->fs_info->chunk_mutex);
@@ -363,7 +350,7 @@ static noinline int device_list_add(const char *path,
363 INIT_LIST_HEAD(&device->dev_alloc_list); 350 INIT_LIST_HEAD(&device->dev_alloc_list);
364 351
365 mutex_lock(&fs_devices->device_list_mutex); 352 mutex_lock(&fs_devices->device_list_mutex);
366 list_add(&device->dev_list, &fs_devices->devices); 353 list_add_rcu(&device->dev_list, &fs_devices->devices);
367 mutex_unlock(&fs_devices->device_list_mutex); 354 mutex_unlock(&fs_devices->device_list_mutex);
368 355
369 device->fs_devices = fs_devices; 356 device->fs_devices = fs_devices;
@@ -406,7 +393,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
406 fs_devices->latest_trans = orig->latest_trans; 393 fs_devices->latest_trans = orig->latest_trans;
407 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid)); 394 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
408 395
409 mutex_lock(&orig->device_list_mutex); 396 /* We have held the volume lock, it is safe to get the devices. */
410 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 397 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
411 device = kzalloc(sizeof(*device), GFP_NOFS); 398 device = kzalloc(sizeof(*device), GFP_NOFS);
412 if (!device) 399 if (!device)
@@ -429,10 +416,8 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
429 device->fs_devices = fs_devices; 416 device->fs_devices = fs_devices;
430 fs_devices->num_devices++; 417 fs_devices->num_devices++;
431 } 418 }
432 mutex_unlock(&orig->device_list_mutex);
433 return fs_devices; 419 return fs_devices;
434error: 420error:
435 mutex_unlock(&orig->device_list_mutex);
436 free_fs_devices(fs_devices); 421 free_fs_devices(fs_devices);
437 return ERR_PTR(-ENOMEM); 422 return ERR_PTR(-ENOMEM);
438} 423}
@@ -443,7 +428,7 @@ int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
443 428
444 mutex_lock(&uuid_mutex); 429 mutex_lock(&uuid_mutex);
445again: 430again:
446 mutex_lock(&fs_devices->device_list_mutex); 431 /* This is the initialized path, it is safe to release the devices. */
447 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 432 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
448 if (device->in_fs_metadata) 433 if (device->in_fs_metadata)
449 continue; 434 continue;
@@ -463,7 +448,6 @@ again:
463 kfree(device->name); 448 kfree(device->name);
464 kfree(device); 449 kfree(device);
465 } 450 }
466 mutex_unlock(&fs_devices->device_list_mutex);
467 451
468 if (fs_devices->seed) { 452 if (fs_devices->seed) {
469 fs_devices = fs_devices->seed; 453 fs_devices = fs_devices->seed;
@@ -474,6 +458,29 @@ again:
474 return 0; 458 return 0;
475} 459}
476 460
461static void __free_device(struct work_struct *work)
462{
463 struct btrfs_device *device;
464
465 device = container_of(work, struct btrfs_device, rcu_work);
466
467 if (device->bdev)
468 blkdev_put(device->bdev, device->mode);
469
470 kfree(device->name);
471 kfree(device);
472}
473
474static void free_device(struct rcu_head *head)
475{
476 struct btrfs_device *device;
477
478 device = container_of(head, struct btrfs_device, rcu);
479
480 INIT_WORK(&device->rcu_work, __free_device);
481 schedule_work(&device->rcu_work);
482}
483
477static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 484static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
478{ 485{
479 struct btrfs_device *device; 486 struct btrfs_device *device;
@@ -481,20 +488,32 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
481 if (--fs_devices->opened > 0) 488 if (--fs_devices->opened > 0)
482 return 0; 489 return 0;
483 490
491 mutex_lock(&fs_devices->device_list_mutex);
484 list_for_each_entry(device, &fs_devices->devices, dev_list) { 492 list_for_each_entry(device, &fs_devices->devices, dev_list) {
485 if (device->bdev) { 493 struct btrfs_device *new_device;
486 blkdev_put(device->bdev, device->mode); 494
495 if (device->bdev)
487 fs_devices->open_devices--; 496 fs_devices->open_devices--;
488 } 497
489 if (device->writeable) { 498 if (device->writeable) {
490 list_del_init(&device->dev_alloc_list); 499 list_del_init(&device->dev_alloc_list);
491 fs_devices->rw_devices--; 500 fs_devices->rw_devices--;
492 } 501 }
493 502
494 device->bdev = NULL; 503 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
495 device->writeable = 0; 504 BUG_ON(!new_device);
496 device->in_fs_metadata = 0; 505 memcpy(new_device, device, sizeof(*new_device));
506 new_device->name = kstrdup(device->name, GFP_NOFS);
507 BUG_ON(!new_device->name);
508 new_device->bdev = NULL;
509 new_device->writeable = 0;
510 new_device->in_fs_metadata = 0;
511 list_replace_rcu(&device->dev_list, &new_device->dev_list);
512
513 call_rcu(&device->rcu, free_device);
497 } 514 }
515 mutex_unlock(&fs_devices->device_list_mutex);
516
498 WARN_ON(fs_devices->open_devices); 517 WARN_ON(fs_devices->open_devices);
499 WARN_ON(fs_devices->rw_devices); 518 WARN_ON(fs_devices->rw_devices);
500 fs_devices->opened = 0; 519 fs_devices->opened = 0;
@@ -597,6 +616,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
597 list_add(&device->dev_alloc_list, 616 list_add(&device->dev_alloc_list,
598 &fs_devices->alloc_list); 617 &fs_devices->alloc_list);
599 } 618 }
619 brelse(bh);
600 continue; 620 continue;
601 621
602error_brelse: 622error_brelse:
@@ -815,10 +835,7 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans,
815 /* we don't want to overwrite the superblock on the drive, 835 /* we don't want to overwrite the superblock on the drive,
816 * so we make sure to start at an offset of at least 1MB 836 * so we make sure to start at an offset of at least 1MB
817 */ 837 */
818 search_start = 1024 * 1024; 838 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
819
820 if (root->fs_info->alloc_start + num_bytes <= search_end)
821 search_start = max(root->fs_info->alloc_start, search_start);
822 839
823 max_hole_start = search_start; 840 max_hole_start = search_start;
824 max_hole_size = 0; 841 max_hole_size = 0;
@@ -949,14 +966,14 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
949 if (ret > 0) { 966 if (ret > 0) {
950 ret = btrfs_previous_item(root, path, key.objectid, 967 ret = btrfs_previous_item(root, path, key.objectid,
951 BTRFS_DEV_EXTENT_KEY); 968 BTRFS_DEV_EXTENT_KEY);
952 BUG_ON(ret); 969 if (ret)
970 goto out;
953 leaf = path->nodes[0]; 971 leaf = path->nodes[0];
954 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 972 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
955 extent = btrfs_item_ptr(leaf, path->slots[0], 973 extent = btrfs_item_ptr(leaf, path->slots[0],
956 struct btrfs_dev_extent); 974 struct btrfs_dev_extent);
957 BUG_ON(found_key.offset > start || found_key.offset + 975 BUG_ON(found_key.offset > start || found_key.offset +
958 btrfs_dev_extent_length(leaf, extent) < start); 976 btrfs_dev_extent_length(leaf, extent) < start);
959 ret = 0;
960 } else if (ret == 0) { 977 } else if (ret == 0) {
961 leaf = path->nodes[0]; 978 leaf = path->nodes[0];
962 extent = btrfs_item_ptr(leaf, path->slots[0], 979 extent = btrfs_item_ptr(leaf, path->slots[0],
@@ -967,8 +984,8 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
967 if (device->bytes_used > 0) 984 if (device->bytes_used > 0)
968 device->bytes_used -= btrfs_dev_extent_length(leaf, extent); 985 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
969 ret = btrfs_del_item(trans, root, path); 986 ret = btrfs_del_item(trans, root, path);
970 BUG_ON(ret);
971 987
988out:
972 btrfs_free_path(path); 989 btrfs_free_path(path);
973 return ret; 990 return ret;
974} 991}
@@ -1203,11 +1220,13 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1203 struct block_device *bdev; 1220 struct block_device *bdev;
1204 struct buffer_head *bh = NULL; 1221 struct buffer_head *bh = NULL;
1205 struct btrfs_super_block *disk_super; 1222 struct btrfs_super_block *disk_super;
1223 struct btrfs_fs_devices *cur_devices;
1206 u64 all_avail; 1224 u64 all_avail;
1207 u64 devid; 1225 u64 devid;
1208 u64 num_devices; 1226 u64 num_devices;
1209 u8 *dev_uuid; 1227 u8 *dev_uuid;
1210 int ret = 0; 1228 int ret = 0;
1229 bool clear_super = false;
1211 1230
1212 mutex_lock(&uuid_mutex); 1231 mutex_lock(&uuid_mutex);
1213 mutex_lock(&root->fs_info->volume_mutex); 1232 mutex_lock(&root->fs_info->volume_mutex);
@@ -1238,14 +1257,16 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1238 1257
1239 device = NULL; 1258 device = NULL;
1240 devices = &root->fs_info->fs_devices->devices; 1259 devices = &root->fs_info->fs_devices->devices;
1241 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 1260 /*
1261 * It is safe to read the devices since the volume_mutex
1262 * is held.
1263 */
1242 list_for_each_entry(tmp, devices, dev_list) { 1264 list_for_each_entry(tmp, devices, dev_list) {
1243 if (tmp->in_fs_metadata && !tmp->bdev) { 1265 if (tmp->in_fs_metadata && !tmp->bdev) {
1244 device = tmp; 1266 device = tmp;
1245 break; 1267 break;
1246 } 1268 }
1247 } 1269 }
1248 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1249 bdev = NULL; 1270 bdev = NULL;
1250 bh = NULL; 1271 bh = NULL;
1251 disk_super = NULL; 1272 disk_super = NULL;
@@ -1287,8 +1308,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1287 } 1308 }
1288 1309
1289 if (device->writeable) { 1310 if (device->writeable) {
1311 lock_chunks(root);
1290 list_del_init(&device->dev_alloc_list); 1312 list_del_init(&device->dev_alloc_list);
1313 unlock_chunks(root);
1291 root->fs_info->fs_devices->rw_devices--; 1314 root->fs_info->fs_devices->rw_devices--;
1315 clear_super = true;
1292 } 1316 }
1293 1317
1294 ret = btrfs_shrink_device(device, 0); 1318 ret = btrfs_shrink_device(device, 0);
@@ -1300,15 +1324,17 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1300 goto error_undo; 1324 goto error_undo;
1301 1325
1302 device->in_fs_metadata = 0; 1326 device->in_fs_metadata = 0;
1327 btrfs_scrub_cancel_dev(root, device);
1303 1328
1304 /* 1329 /*
1305 * the device list mutex makes sure that we don't change 1330 * the device list mutex makes sure that we don't change
1306 * the device list while someone else is writing out all 1331 * the device list while someone else is writing out all
1307 * the device supers. 1332 * the device supers.
1308 */ 1333 */
1334
1335 cur_devices = device->fs_devices;
1309 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 1336 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1310 list_del_init(&device->dev_list); 1337 list_del_rcu(&device->dev_list);
1311 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1312 1338
1313 device->fs_devices->num_devices--; 1339 device->fs_devices->num_devices--;
1314 1340
@@ -1322,34 +1348,36 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1322 if (device->bdev == root->fs_info->fs_devices->latest_bdev) 1348 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1323 root->fs_info->fs_devices->latest_bdev = next_device->bdev; 1349 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1324 1350
1325 if (device->bdev) { 1351 if (device->bdev)
1326 blkdev_put(device->bdev, device->mode);
1327 device->bdev = NULL;
1328 device->fs_devices->open_devices--; 1352 device->fs_devices->open_devices--;
1329 } 1353
1354 call_rcu(&device->rcu, free_device);
1355 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1330 1356
1331 num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1; 1357 num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1332 btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices); 1358 btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices);
1333 1359
1334 if (device->fs_devices->open_devices == 0) { 1360 if (cur_devices->open_devices == 0) {
1335 struct btrfs_fs_devices *fs_devices; 1361 struct btrfs_fs_devices *fs_devices;
1336 fs_devices = root->fs_info->fs_devices; 1362 fs_devices = root->fs_info->fs_devices;
1337 while (fs_devices) { 1363 while (fs_devices) {
1338 if (fs_devices->seed == device->fs_devices) 1364 if (fs_devices->seed == cur_devices)
1339 break; 1365 break;
1340 fs_devices = fs_devices->seed; 1366 fs_devices = fs_devices->seed;
1341 } 1367 }
1342 fs_devices->seed = device->fs_devices->seed; 1368 fs_devices->seed = cur_devices->seed;
1343 device->fs_devices->seed = NULL; 1369 cur_devices->seed = NULL;
1344 __btrfs_close_devices(device->fs_devices); 1370 lock_chunks(root);
1345 free_fs_devices(device->fs_devices); 1371 __btrfs_close_devices(cur_devices);
1372 unlock_chunks(root);
1373 free_fs_devices(cur_devices);
1346 } 1374 }
1347 1375
1348 /* 1376 /*
1349 * at this point, the device is zero sized. We want to 1377 * at this point, the device is zero sized. We want to
1350 * remove it from the devices list and zero out the old super 1378 * remove it from the devices list and zero out the old super
1351 */ 1379 */
1352 if (device->writeable) { 1380 if (clear_super) {
1353 /* make sure this device isn't detected as part of 1381 /* make sure this device isn't detected as part of
1354 * the FS anymore 1382 * the FS anymore
1355 */ 1383 */
@@ -1358,8 +1386,6 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1358 sync_dirty_buffer(bh); 1386 sync_dirty_buffer(bh);
1359 } 1387 }
1360 1388
1361 kfree(device->name);
1362 kfree(device);
1363 ret = 0; 1389 ret = 0;
1364 1390
1365error_brelse: 1391error_brelse:
@@ -1373,8 +1399,10 @@ out:
1373 return ret; 1399 return ret;
1374error_undo: 1400error_undo:
1375 if (device->writeable) { 1401 if (device->writeable) {
1402 lock_chunks(root);
1376 list_add(&device->dev_alloc_list, 1403 list_add(&device->dev_alloc_list,
1377 &root->fs_info->fs_devices->alloc_list); 1404 &root->fs_info->fs_devices->alloc_list);
1405 unlock_chunks(root);
1378 root->fs_info->fs_devices->rw_devices++; 1406 root->fs_info->fs_devices->rw_devices++;
1379 } 1407 }
1380 goto error_brelse; 1408 goto error_brelse;
@@ -1414,7 +1442,12 @@ static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
1414 INIT_LIST_HEAD(&seed_devices->devices); 1442 INIT_LIST_HEAD(&seed_devices->devices);
1415 INIT_LIST_HEAD(&seed_devices->alloc_list); 1443 INIT_LIST_HEAD(&seed_devices->alloc_list);
1416 mutex_init(&seed_devices->device_list_mutex); 1444 mutex_init(&seed_devices->device_list_mutex);
1417 list_splice_init(&fs_devices->devices, &seed_devices->devices); 1445
1446 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1447 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1448 synchronize_rcu);
1449 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1450
1418 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); 1451 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1419 list_for_each_entry(device, &seed_devices->devices, dev_list) { 1452 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1420 device->fs_devices = seed_devices; 1453 device->fs_devices = seed_devices;
@@ -1475,7 +1508,7 @@ next_slot:
1475 goto error; 1508 goto error;
1476 leaf = path->nodes[0]; 1509 leaf = path->nodes[0];
1477 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1510 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1478 btrfs_release_path(root, path); 1511 btrfs_release_path(path);
1479 continue; 1512 continue;
1480 } 1513 }
1481 1514
@@ -1611,7 +1644,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1611 * half setup 1644 * half setup
1612 */ 1645 */
1613 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 1646 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1614 list_add(&device->dev_list, &root->fs_info->fs_devices->devices); 1647 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1615 list_add(&device->dev_alloc_list, 1648 list_add(&device->dev_alloc_list,
1616 &root->fs_info->fs_devices->alloc_list); 1649 &root->fs_info->fs_devices->alloc_list);
1617 root->fs_info->fs_devices->num_devices++; 1650 root->fs_info->fs_devices->num_devices++;
@@ -1769,10 +1802,9 @@ static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1769 BUG_ON(ret); 1802 BUG_ON(ret);
1770 1803
1771 ret = btrfs_del_item(trans, root, path); 1804 ret = btrfs_del_item(trans, root, path);
1772 BUG_ON(ret);
1773 1805
1774 btrfs_free_path(path); 1806 btrfs_free_path(path);
1775 return 0; 1807 return ret;
1776} 1808}
1777 1809
1778static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64 1810static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
@@ -1947,7 +1979,7 @@ again:
1947 chunk = btrfs_item_ptr(leaf, path->slots[0], 1979 chunk = btrfs_item_ptr(leaf, path->slots[0],
1948 struct btrfs_chunk); 1980 struct btrfs_chunk);
1949 chunk_type = btrfs_chunk_type(leaf, chunk); 1981 chunk_type = btrfs_chunk_type(leaf, chunk);
1950 btrfs_release_path(chunk_root, path); 1982 btrfs_release_path(path);
1951 1983
1952 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { 1984 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
1953 ret = btrfs_relocate_chunk(chunk_root, chunk_tree, 1985 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
@@ -2065,7 +2097,7 @@ int btrfs_balance(struct btrfs_root *dev_root)
2065 if (found_key.offset == 0) 2097 if (found_key.offset == 0)
2066 break; 2098 break;
2067 2099
2068 btrfs_release_path(chunk_root, path); 2100 btrfs_release_path(path);
2069 ret = btrfs_relocate_chunk(chunk_root, 2101 ret = btrfs_relocate_chunk(chunk_root,
2070 chunk_root->root_key.objectid, 2102 chunk_root->root_key.objectid,
2071 found_key.objectid, 2103 found_key.objectid,
@@ -2137,7 +2169,7 @@ again:
2137 goto done; 2169 goto done;
2138 if (ret) { 2170 if (ret) {
2139 ret = 0; 2171 ret = 0;
2140 btrfs_release_path(root, path); 2172 btrfs_release_path(path);
2141 break; 2173 break;
2142 } 2174 }
2143 2175
@@ -2146,7 +2178,7 @@ again:
2146 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 2178 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
2147 2179
2148 if (key.objectid != device->devid) { 2180 if (key.objectid != device->devid) {
2149 btrfs_release_path(root, path); 2181 btrfs_release_path(path);
2150 break; 2182 break;
2151 } 2183 }
2152 2184
@@ -2154,14 +2186,14 @@ again:
2154 length = btrfs_dev_extent_length(l, dev_extent); 2186 length = btrfs_dev_extent_length(l, dev_extent);
2155 2187
2156 if (key.offset + length <= new_size) { 2188 if (key.offset + length <= new_size) {
2157 btrfs_release_path(root, path); 2189 btrfs_release_path(path);
2158 break; 2190 break;
2159 } 2191 }
2160 2192
2161 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); 2193 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2162 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); 2194 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2163 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 2195 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2164 btrfs_release_path(root, path); 2196 btrfs_release_path(path);
2165 2197
2166 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid, 2198 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
2167 chunk_offset); 2199 chunk_offset);
@@ -2237,275 +2269,204 @@ static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
2237 return 0; 2269 return 0;
2238} 2270}
2239 2271
2240static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size, 2272/*
2241 int num_stripes, int sub_stripes) 2273 * sort the devices in descending order by max_avail, total_avail
2274 */
2275static int btrfs_cmp_device_info(const void *a, const void *b)
2242{ 2276{
2243 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) 2277 const struct btrfs_device_info *di_a = a;
2244 return calc_size; 2278 const struct btrfs_device_info *di_b = b;
2245 else if (type & BTRFS_BLOCK_GROUP_RAID10)
2246 return calc_size * (num_stripes / sub_stripes);
2247 else
2248 return calc_size * num_stripes;
2249}
2250 2279
2251/* Used to sort the devices by max_avail(descending sort) */ 2280 if (di_a->max_avail > di_b->max_avail)
2252int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2)
2253{
2254 if (((struct btrfs_device_info *)dev_info1)->max_avail >
2255 ((struct btrfs_device_info *)dev_info2)->max_avail)
2256 return -1; 2281 return -1;
2257 else if (((struct btrfs_device_info *)dev_info1)->max_avail < 2282 if (di_a->max_avail < di_b->max_avail)
2258 ((struct btrfs_device_info *)dev_info2)->max_avail)
2259 return 1; 2283 return 1;
2260 else 2284 if (di_a->total_avail > di_b->total_avail)
2261 return 0; 2285 return -1;
2286 if (di_a->total_avail < di_b->total_avail)
2287 return 1;
2288 return 0;
2262} 2289}
2263 2290
2264static int __btrfs_calc_nstripes(struct btrfs_fs_devices *fs_devices, u64 type, 2291static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2265 int *num_stripes, int *min_stripes, 2292 struct btrfs_root *extent_root,
2266 int *sub_stripes) 2293 struct map_lookup **map_ret,
2294 u64 *num_bytes_out, u64 *stripe_size_out,
2295 u64 start, u64 type)
2267{ 2296{
2268 *num_stripes = 1; 2297 struct btrfs_fs_info *info = extent_root->fs_info;
2269 *min_stripes = 1; 2298 struct btrfs_fs_devices *fs_devices = info->fs_devices;
2270 *sub_stripes = 0; 2299 struct list_head *cur;
2300 struct map_lookup *map = NULL;
2301 struct extent_map_tree *em_tree;
2302 struct extent_map *em;
2303 struct btrfs_device_info *devices_info = NULL;
2304 u64 total_avail;
2305 int num_stripes; /* total number of stripes to allocate */
2306 int sub_stripes; /* sub_stripes info for map */
2307 int dev_stripes; /* stripes per dev */
2308 int devs_max; /* max devs to use */
2309 int devs_min; /* min devs needed */
2310 int devs_increment; /* ndevs has to be a multiple of this */
2311 int ncopies; /* how many copies to data has */
2312 int ret;
2313 u64 max_stripe_size;
2314 u64 max_chunk_size;
2315 u64 stripe_size;
2316 u64 num_bytes;
2317 int ndevs;
2318 int i;
2319 int j;
2271 2320
2272 if (type & (BTRFS_BLOCK_GROUP_RAID0)) { 2321 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
2273 *num_stripes = fs_devices->rw_devices; 2322 (type & BTRFS_BLOCK_GROUP_DUP)) {
2274 *min_stripes = 2; 2323 WARN_ON(1);
2275 } 2324 type &= ~BTRFS_BLOCK_GROUP_DUP;
2276 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
2277 *num_stripes = 2;
2278 *min_stripes = 2;
2279 }
2280 if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
2281 if (fs_devices->rw_devices < 2)
2282 return -ENOSPC;
2283 *num_stripes = 2;
2284 *min_stripes = 2;
2285 }
2286 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2287 *num_stripes = fs_devices->rw_devices;
2288 if (*num_stripes < 4)
2289 return -ENOSPC;
2290 *num_stripes &= ~(u32)1;
2291 *sub_stripes = 2;
2292 *min_stripes = 4;
2293 } 2325 }
2294 2326
2295 return 0; 2327 if (list_empty(&fs_devices->alloc_list))
2296} 2328 return -ENOSPC;
2297 2329
2298static u64 __btrfs_calc_stripe_size(struct btrfs_fs_devices *fs_devices, 2330 sub_stripes = 1;
2299 u64 proposed_size, u64 type, 2331 dev_stripes = 1;
2300 int num_stripes, int small_stripe) 2332 devs_increment = 1;
2301{ 2333 ncopies = 1;
2302 int min_stripe_size = 1 * 1024 * 1024; 2334 devs_max = 0; /* 0 == as many as possible */
2303 u64 calc_size = proposed_size; 2335 devs_min = 1;
2304 u64 max_chunk_size = calc_size;
2305 int ncopies = 1;
2306 2336
2307 if (type & (BTRFS_BLOCK_GROUP_RAID1 | 2337 /*
2308 BTRFS_BLOCK_GROUP_DUP | 2338 * define the properties of each RAID type.
2309 BTRFS_BLOCK_GROUP_RAID10)) 2339 * FIXME: move this to a global table and use it in all RAID
2340 * calculation code
2341 */
2342 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
2343 dev_stripes = 2;
2344 ncopies = 2;
2345 devs_max = 1;
2346 } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
2347 devs_min = 2;
2348 } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
2349 devs_increment = 2;
2310 ncopies = 2; 2350 ncopies = 2;
2351 devs_max = 2;
2352 devs_min = 2;
2353 } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2354 sub_stripes = 2;
2355 devs_increment = 2;
2356 ncopies = 2;
2357 devs_min = 4;
2358 } else {
2359 devs_max = 1;
2360 }
2311 2361
2312 if (type & BTRFS_BLOCK_GROUP_DATA) { 2362 if (type & BTRFS_BLOCK_GROUP_DATA) {
2313 max_chunk_size = 10 * calc_size; 2363 max_stripe_size = 1024 * 1024 * 1024;
2314 min_stripe_size = 64 * 1024 * 1024; 2364 max_chunk_size = 10 * max_stripe_size;
2315 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 2365 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
2316 max_chunk_size = 256 * 1024 * 1024; 2366 max_stripe_size = 256 * 1024 * 1024;
2317 min_stripe_size = 32 * 1024 * 1024; 2367 max_chunk_size = max_stripe_size;
2318 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 2368 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
2319 calc_size = 8 * 1024 * 1024; 2369 max_stripe_size = 8 * 1024 * 1024;
2320 max_chunk_size = calc_size * 2; 2370 max_chunk_size = 2 * max_stripe_size;
2321 min_stripe_size = 1 * 1024 * 1024; 2371 } else {
2372 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
2373 type);
2374 BUG_ON(1);
2322 } 2375 }
2323 2376
2324 /* we don't want a chunk larger than 10% of writeable space */ 2377 /* we don't want a chunk larger than 10% of writeable space */
2325 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 2378 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
2326 max_chunk_size); 2379 max_chunk_size);
2327 2380
2328 if (calc_size * num_stripes > max_chunk_size * ncopies) { 2381 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
2329 calc_size = max_chunk_size * ncopies; 2382 GFP_NOFS);
2330 do_div(calc_size, num_stripes); 2383 if (!devices_info)
2331 do_div(calc_size, BTRFS_STRIPE_LEN); 2384 return -ENOMEM;
2332 calc_size *= BTRFS_STRIPE_LEN;
2333 }
2334 2385
2335 /* we don't want tiny stripes */ 2386 cur = fs_devices->alloc_list.next;
2336 if (!small_stripe)
2337 calc_size = max_t(u64, min_stripe_size, calc_size);
2338 2387
2339 /* 2388 /*
2340 * we're about to do_div by the BTRFS_STRIPE_LEN so lets make sure 2389 * in the first pass through the devices list, we gather information
2341 * we end up with something bigger than a stripe 2390 * about the available holes on each device.
2342 */ 2391 */
2343 calc_size = max_t(u64, calc_size, BTRFS_STRIPE_LEN); 2392 ndevs = 0;
2344 2393 while (cur != &fs_devices->alloc_list) {
2345 do_div(calc_size, BTRFS_STRIPE_LEN); 2394 struct btrfs_device *device;
2346 calc_size *= BTRFS_STRIPE_LEN; 2395 u64 max_avail;
2347 2396 u64 dev_offset;
2348 return calc_size;
2349}
2350
2351static struct map_lookup *__shrink_map_lookup_stripes(struct map_lookup *map,
2352 int num_stripes)
2353{
2354 struct map_lookup *new;
2355 size_t len = map_lookup_size(num_stripes);
2356
2357 BUG_ON(map->num_stripes < num_stripes);
2358
2359 if (map->num_stripes == num_stripes)
2360 return map;
2361
2362 new = kmalloc(len, GFP_NOFS);
2363 if (!new) {
2364 /* just change map->num_stripes */
2365 map->num_stripes = num_stripes;
2366 return map;
2367 }
2368
2369 memcpy(new, map, len);
2370 new->num_stripes = num_stripes;
2371 kfree(map);
2372 return new;
2373}
2374 2397
2375/* 2398 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
2376 * helper to allocate device space from btrfs_device_info, in which we stored
2377 * max free space information of every device. It is used when we can not
2378 * allocate chunks by default size.
2379 *
2380 * By this helper, we can allocate a new chunk as larger as possible.
2381 */
2382static int __btrfs_alloc_tiny_space(struct btrfs_trans_handle *trans,
2383 struct btrfs_fs_devices *fs_devices,
2384 struct btrfs_device_info *devices,
2385 int nr_device, u64 type,
2386 struct map_lookup **map_lookup,
2387 int min_stripes, u64 *stripe_size)
2388{
2389 int i, index, sort_again = 0;
2390 int min_devices = min_stripes;
2391 u64 max_avail, min_free;
2392 struct map_lookup *map = *map_lookup;
2393 int ret;
2394 2399
2395 if (nr_device < min_stripes) 2400 cur = cur->next;
2396 return -ENOSPC;
2397 2401
2398 btrfs_descending_sort_devices(devices, nr_device); 2402 if (!device->writeable) {
2403 printk(KERN_ERR
2404 "btrfs: read-only device in alloc_list\n");
2405 WARN_ON(1);
2406 continue;
2407 }
2399 2408
2400 max_avail = devices[0].max_avail; 2409 if (!device->in_fs_metadata)
2401 if (!max_avail) 2410 continue;
2402 return -ENOSPC;
2403 2411
2404 for (i = 0; i < nr_device; i++) { 2412 if (device->total_bytes > device->bytes_used)
2405 /* 2413 total_avail = device->total_bytes - device->bytes_used;
2406 * if dev_offset = 0, it means the free space of this device 2414 else
2407 * is less than what we need, and we didn't search max avail 2415 total_avail = 0;
2408 * extent on this device, so do it now. 2416 /* avail is off by max(alloc_start, 1MB), but that is the same
2417 * for all devices, so it doesn't hurt the sorting later on
2409 */ 2418 */
2410 if (!devices[i].dev_offset) {
2411 ret = find_free_dev_extent(trans, devices[i].dev,
2412 max_avail,
2413 &devices[i].dev_offset,
2414 &devices[i].max_avail);
2415 if (ret != 0 && ret != -ENOSPC)
2416 return ret;
2417 sort_again = 1;
2418 }
2419 }
2420
2421 /* we update the max avail free extent of each devices, sort again */
2422 if (sort_again)
2423 btrfs_descending_sort_devices(devices, nr_device);
2424 2419
2425 if (type & BTRFS_BLOCK_GROUP_DUP) 2420 ret = find_free_dev_extent(trans, device,
2426 min_devices = 1; 2421 max_stripe_size * dev_stripes,
2422 &dev_offset, &max_avail);
2423 if (ret && ret != -ENOSPC)
2424 goto error;
2427 2425
2428 if (!devices[min_devices - 1].max_avail) 2426 if (ret == 0)
2429 return -ENOSPC; 2427 max_avail = max_stripe_size * dev_stripes;
2430 2428
2431 max_avail = devices[min_devices - 1].max_avail; 2429 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
2432 if (type & BTRFS_BLOCK_GROUP_DUP) 2430 continue;
2433 do_div(max_avail, 2);
2434 2431
2435 max_avail = __btrfs_calc_stripe_size(fs_devices, max_avail, type, 2432 devices_info[ndevs].dev_offset = dev_offset;
2436 min_stripes, 1); 2433 devices_info[ndevs].max_avail = max_avail;
2437 if (type & BTRFS_BLOCK_GROUP_DUP) 2434 devices_info[ndevs].total_avail = total_avail;
2438 min_free = max_avail * 2; 2435 devices_info[ndevs].dev = device;
2439 else 2436 ++ndevs;
2440 min_free = max_avail; 2437 }
2441 2438
2442 if (min_free > devices[min_devices - 1].max_avail) 2439 /*
2443 return -ENOSPC; 2440 * now sort the devices by hole size / available space
2441 */
2442 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
2443 btrfs_cmp_device_info, NULL);
2444 2444
2445 map = __shrink_map_lookup_stripes(map, min_stripes); 2445 /* round down to number of usable stripes */
2446 *stripe_size = max_avail; 2446 ndevs -= ndevs % devs_increment;
2447 2447
2448 index = 0; 2448 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
2449 for (i = 0; i < min_stripes; i++) { 2449 ret = -ENOSPC;
2450 map->stripes[i].dev = devices[index].dev; 2450 goto error;
2451 map->stripes[i].physical = devices[index].dev_offset;
2452 if (type & BTRFS_BLOCK_GROUP_DUP) {
2453 i++;
2454 map->stripes[i].dev = devices[index].dev;
2455 map->stripes[i].physical = devices[index].dev_offset +
2456 max_avail;
2457 }
2458 index++;
2459 } 2451 }
2460 *map_lookup = map;
2461 2452
2462 return 0; 2453 if (devs_max && ndevs > devs_max)
2463} 2454 ndevs = devs_max;
2464 2455 /*
2465static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 2456 * the primary goal is to maximize the number of stripes, so use as many
2466 struct btrfs_root *extent_root, 2457 * devices as possible, even if the stripes are not maximum sized.
2467 struct map_lookup **map_ret, 2458 */
2468 u64 *num_bytes, u64 *stripe_size, 2459 stripe_size = devices_info[ndevs-1].max_avail;
2469 u64 start, u64 type) 2460 num_stripes = ndevs * dev_stripes;
2470{
2471 struct btrfs_fs_info *info = extent_root->fs_info;
2472 struct btrfs_device *device = NULL;
2473 struct btrfs_fs_devices *fs_devices = info->fs_devices;
2474 struct list_head *cur;
2475 struct map_lookup *map;
2476 struct extent_map_tree *em_tree;
2477 struct extent_map *em;
2478 struct btrfs_device_info *devices_info;
2479 struct list_head private_devs;
2480 u64 calc_size = 1024 * 1024 * 1024;
2481 u64 min_free;
2482 u64 avail;
2483 u64 dev_offset;
2484 int num_stripes;
2485 int min_stripes;
2486 int sub_stripes;
2487 int min_devices; /* the min number of devices we need */
2488 int i;
2489 int ret;
2490 int index;
2491 2461
2492 if ((type & BTRFS_BLOCK_GROUP_RAID1) && 2462 if (stripe_size * num_stripes > max_chunk_size * ncopies) {
2493 (type & BTRFS_BLOCK_GROUP_DUP)) { 2463 stripe_size = max_chunk_size * ncopies;
2494 WARN_ON(1); 2464 do_div(stripe_size, num_stripes);
2495 type &= ~BTRFS_BLOCK_GROUP_DUP;
2496 } 2465 }
2497 if (list_empty(&fs_devices->alloc_list))
2498 return -ENOSPC;
2499
2500 ret = __btrfs_calc_nstripes(fs_devices, type, &num_stripes,
2501 &min_stripes, &sub_stripes);
2502 if (ret)
2503 return ret;
2504 2466
2505 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices, 2467 do_div(stripe_size, dev_stripes);
2506 GFP_NOFS); 2468 do_div(stripe_size, BTRFS_STRIPE_LEN);
2507 if (!devices_info) 2469 stripe_size *= BTRFS_STRIPE_LEN;
2508 return -ENOMEM;
2509 2470
2510 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); 2471 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2511 if (!map) { 2472 if (!map) {
@@ -2514,85 +2475,12 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2514 } 2475 }
2515 map->num_stripes = num_stripes; 2476 map->num_stripes = num_stripes;
2516 2477
2517 cur = fs_devices->alloc_list.next; 2478 for (i = 0; i < ndevs; ++i) {
2518 index = 0; 2479 for (j = 0; j < dev_stripes; ++j) {
2519 i = 0; 2480 int s = i * dev_stripes + j;
2520 2481 map->stripes[s].dev = devices_info[i].dev;
2521 calc_size = __btrfs_calc_stripe_size(fs_devices, calc_size, type, 2482 map->stripes[s].physical = devices_info[i].dev_offset +
2522 num_stripes, 0); 2483 j * stripe_size;
2523
2524 if (type & BTRFS_BLOCK_GROUP_DUP) {
2525 min_free = calc_size * 2;
2526 min_devices = 1;
2527 } else {
2528 min_free = calc_size;
2529 min_devices = min_stripes;
2530 }
2531
2532 INIT_LIST_HEAD(&private_devs);
2533 while (index < num_stripes) {
2534 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
2535 BUG_ON(!device->writeable);
2536 if (device->total_bytes > device->bytes_used)
2537 avail = device->total_bytes - device->bytes_used;
2538 else
2539 avail = 0;
2540 cur = cur->next;
2541
2542 if (device->in_fs_metadata && avail >= min_free) {
2543 ret = find_free_dev_extent(trans, device, min_free,
2544 &devices_info[i].dev_offset,
2545 &devices_info[i].max_avail);
2546 if (ret == 0) {
2547 list_move_tail(&device->dev_alloc_list,
2548 &private_devs);
2549 map->stripes[index].dev = device;
2550 map->stripes[index].physical =
2551 devices_info[i].dev_offset;
2552 index++;
2553 if (type & BTRFS_BLOCK_GROUP_DUP) {
2554 map->stripes[index].dev = device;
2555 map->stripes[index].physical =
2556 devices_info[i].dev_offset +
2557 calc_size;
2558 index++;
2559 }
2560 } else if (ret != -ENOSPC)
2561 goto error;
2562
2563 devices_info[i].dev = device;
2564 i++;
2565 } else if (device->in_fs_metadata &&
2566 avail >= BTRFS_STRIPE_LEN) {
2567 devices_info[i].dev = device;
2568 devices_info[i].max_avail = avail;
2569 i++;
2570 }
2571
2572 if (cur == &fs_devices->alloc_list)
2573 break;
2574 }
2575
2576 list_splice(&private_devs, &fs_devices->alloc_list);
2577 if (index < num_stripes) {
2578 if (index >= min_stripes) {
2579 num_stripes = index;
2580 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2581 num_stripes /= sub_stripes;
2582 num_stripes *= sub_stripes;
2583 }
2584
2585 map = __shrink_map_lookup_stripes(map, num_stripes);
2586 } else if (i >= min_devices) {
2587 ret = __btrfs_alloc_tiny_space(trans, fs_devices,
2588 devices_info, i, type,
2589 &map, min_stripes,
2590 &calc_size);
2591 if (ret)
2592 goto error;
2593 } else {
2594 ret = -ENOSPC;
2595 goto error;
2596 } 2484 }
2597 } 2485 }
2598 map->sector_size = extent_root->sectorsize; 2486 map->sector_size = extent_root->sectorsize;
@@ -2603,20 +2491,21 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2603 map->sub_stripes = sub_stripes; 2491 map->sub_stripes = sub_stripes;
2604 2492
2605 *map_ret = map; 2493 *map_ret = map;
2606 *stripe_size = calc_size; 2494 num_bytes = stripe_size * (num_stripes / ncopies);
2607 *num_bytes = chunk_bytes_by_type(type, calc_size,
2608 map->num_stripes, sub_stripes);
2609 2495
2610 trace_btrfs_chunk_alloc(info->chunk_root, map, start, *num_bytes); 2496 *stripe_size_out = stripe_size;
2497 *num_bytes_out = num_bytes;
2611 2498
2612 em = alloc_extent_map(GFP_NOFS); 2499 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
2500
2501 em = alloc_extent_map();
2613 if (!em) { 2502 if (!em) {
2614 ret = -ENOMEM; 2503 ret = -ENOMEM;
2615 goto error; 2504 goto error;
2616 } 2505 }
2617 em->bdev = (struct block_device *)map; 2506 em->bdev = (struct block_device *)map;
2618 em->start = start; 2507 em->start = start;
2619 em->len = *num_bytes; 2508 em->len = num_bytes;
2620 em->block_start = 0; 2509 em->block_start = 0;
2621 em->block_len = em->len; 2510 em->block_len = em->len;
2622 2511
@@ -2629,20 +2518,21 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2629 2518
2630 ret = btrfs_make_block_group(trans, extent_root, 0, type, 2519 ret = btrfs_make_block_group(trans, extent_root, 0, type,
2631 BTRFS_FIRST_CHUNK_TREE_OBJECTID, 2520 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2632 start, *num_bytes); 2521 start, num_bytes);
2633 BUG_ON(ret); 2522 BUG_ON(ret);
2634 2523
2635 index = 0; 2524 for (i = 0; i < map->num_stripes; ++i) {
2636 while (index < map->num_stripes) { 2525 struct btrfs_device *device;
2637 device = map->stripes[index].dev; 2526 u64 dev_offset;
2638 dev_offset = map->stripes[index].physical; 2527
2528 device = map->stripes[i].dev;
2529 dev_offset = map->stripes[i].physical;
2639 2530
2640 ret = btrfs_alloc_dev_extent(trans, device, 2531 ret = btrfs_alloc_dev_extent(trans, device,
2641 info->chunk_root->root_key.objectid, 2532 info->chunk_root->root_key.objectid,
2642 BTRFS_FIRST_CHUNK_TREE_OBJECTID, 2533 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2643 start, dev_offset, calc_size); 2534 start, dev_offset, stripe_size);
2644 BUG_ON(ret); 2535 BUG_ON(ret);
2645 index++;
2646 } 2536 }
2647 2537
2648 kfree(devices_info); 2538 kfree(devices_info);
@@ -2849,7 +2739,7 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
2849 2739
2850void btrfs_mapping_init(struct btrfs_mapping_tree *tree) 2740void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
2851{ 2741{
2852 extent_map_tree_init(&tree->map_tree, GFP_NOFS); 2742 extent_map_tree_init(&tree->map_tree);
2853} 2743}
2854 2744
2855void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) 2745void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
@@ -3499,7 +3389,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
3499 free_extent_map(em); 3389 free_extent_map(em);
3500 } 3390 }
3501 3391
3502 em = alloc_extent_map(GFP_NOFS); 3392 em = alloc_extent_map();
3503 if (!em) 3393 if (!em)
3504 return -ENOMEM; 3394 return -ENOMEM;
3505 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 3395 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
@@ -3688,15 +3578,6 @@ static int read_one_dev(struct btrfs_root *root,
3688 return ret; 3578 return ret;
3689} 3579}
3690 3580
3691int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
3692{
3693 struct btrfs_dev_item *dev_item;
3694
3695 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
3696 dev_item);
3697 return read_one_dev(root, buf, dev_item);
3698}
3699
3700int btrfs_read_sys_array(struct btrfs_root *root) 3581int btrfs_read_sys_array(struct btrfs_root *root)
3701{ 3582{
3702 struct btrfs_super_block *super_copy = &root->fs_info->super_copy; 3583 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
@@ -3813,7 +3694,7 @@ again:
3813 } 3694 }
3814 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) { 3695 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3815 key.objectid = 0; 3696 key.objectid = 0;
3816 btrfs_release_path(root, path); 3697 btrfs_release_path(path);
3817 goto again; 3698 goto again;
3818 } 3699 }
3819 ret = 0; 3700 ret = 0;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index cc2eadaf7a27..7c12d61ae7ae 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -85,7 +85,12 @@ struct btrfs_device {
85 /* physical drive uuid (or lvm uuid) */ 85 /* physical drive uuid (or lvm uuid) */
86 u8 uuid[BTRFS_UUID_SIZE]; 86 u8 uuid[BTRFS_UUID_SIZE];
87 87
88 /* per-device scrub information */
89 struct scrub_dev *scrub_device;
90
88 struct btrfs_work work; 91 struct btrfs_work work;
92 struct rcu_head rcu;
93 struct work_struct rcu_work;
89}; 94};
90 95
91struct btrfs_fs_devices { 96struct btrfs_fs_devices {
@@ -144,6 +149,7 @@ struct btrfs_device_info {
144 struct btrfs_device *dev; 149 struct btrfs_device *dev;
145 u64 dev_offset; 150 u64 dev_offset;
146 u64 max_avail; 151 u64 max_avail;
152 u64 total_avail;
147}; 153};
148 154
149struct map_lookup { 155struct map_lookup {
@@ -157,20 +163,8 @@ struct map_lookup {
157 struct btrfs_bio_stripe stripes[]; 163 struct btrfs_bio_stripe stripes[];
158}; 164};
159 165
160/* Used to sort the devices by max_avail(descending sort) */ 166#define map_lookup_size(n) (sizeof(struct map_lookup) + \
161int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2); 167 (sizeof(struct btrfs_bio_stripe) * (n)))
162
163/*
164 * sort the devices by max_avail, in which max free extent size of each device
165 * is stored.(Descending Sort)
166 */
167static inline void btrfs_descending_sort_devices(
168 struct btrfs_device_info *devices,
169 size_t nr_devices)
170{
171 sort(devices, nr_devices, sizeof(struct btrfs_device_info),
172 btrfs_cmp_device_free_bytes, NULL);
173}
174 168
175int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, 169int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
176 u64 end, u64 *length); 170 u64 end, u64 *length);
@@ -196,7 +190,6 @@ void btrfs_mapping_init(struct btrfs_mapping_tree *tree);
196void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree); 190void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
197int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, 191int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
198 int mirror_num, int async_submit); 192 int mirror_num, int async_submit);
199int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf);
200int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 193int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
201 fmode_t flags, void *holder); 194 fmode_t flags, void *holder);
202int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, 195int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
@@ -209,8 +202,6 @@ int btrfs_add_device(struct btrfs_trans_handle *trans,
209int btrfs_rm_device(struct btrfs_root *root, char *device_path); 202int btrfs_rm_device(struct btrfs_root *root, char *device_path);
210int btrfs_cleanup_fs_uuids(void); 203int btrfs_cleanup_fs_uuids(void);
211int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len); 204int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len);
212int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
213 u64 logical, struct page *page);
214int btrfs_grow_device(struct btrfs_trans_handle *trans, 205int btrfs_grow_device(struct btrfs_trans_handle *trans,
215 struct btrfs_device *device, u64 new_size); 206 struct btrfs_device *device, u64 new_size);
216struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid, 207struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
@@ -218,8 +209,6 @@ struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
218int btrfs_shrink_device(struct btrfs_device *device, u64 new_size); 209int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
219int btrfs_init_new_device(struct btrfs_root *root, char *path); 210int btrfs_init_new_device(struct btrfs_root *root, char *path);
220int btrfs_balance(struct btrfs_root *dev_root); 211int btrfs_balance(struct btrfs_root *dev_root);
221void btrfs_unlock_volumes(void);
222void btrfs_lock_volumes(void);
223int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset); 212int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
224int find_free_dev_extent(struct btrfs_trans_handle *trans, 213int find_free_dev_extent(struct btrfs_trans_handle *trans,
225 struct btrfs_device *device, u64 num_bytes, 214 struct btrfs_device *device, u64 num_bytes,
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index cfd660550ded..f3107e4b4d56 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -44,7 +44,7 @@ ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
44 return -ENOMEM; 44 return -ENOMEM;
45 45
46 /* lookup the xattr by name */ 46 /* lookup the xattr by name */
47 di = btrfs_lookup_xattr(NULL, root, path, inode->i_ino, name, 47 di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name,
48 strlen(name), 0); 48 strlen(name), 0);
49 if (!di) { 49 if (!di) {
50 ret = -ENODATA; 50 ret = -ENODATA;
@@ -103,7 +103,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
103 return -ENOMEM; 103 return -ENOMEM;
104 104
105 /* first lets see if we already have this xattr */ 105 /* first lets see if we already have this xattr */
106 di = btrfs_lookup_xattr(trans, root, path, inode->i_ino, name, 106 di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name,
107 strlen(name), -1); 107 strlen(name), -1);
108 if (IS_ERR(di)) { 108 if (IS_ERR(di)) {
109 ret = PTR_ERR(di); 109 ret = PTR_ERR(di);
@@ -120,13 +120,13 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
120 120
121 ret = btrfs_delete_one_dir_name(trans, root, path, di); 121 ret = btrfs_delete_one_dir_name(trans, root, path, di);
122 BUG_ON(ret); 122 BUG_ON(ret);
123 btrfs_release_path(root, path); 123 btrfs_release_path(path);
124 124
125 /* if we don't have a value then we are removing the xattr */ 125 /* if we don't have a value then we are removing the xattr */
126 if (!value) 126 if (!value)
127 goto out; 127 goto out;
128 } else { 128 } else {
129 btrfs_release_path(root, path); 129 btrfs_release_path(path);
130 130
131 if (flags & XATTR_REPLACE) { 131 if (flags & XATTR_REPLACE) {
132 /* we couldn't find the attr to replace */ 132 /* we couldn't find the attr to replace */
@@ -136,7 +136,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
136 } 136 }
137 137
138 /* ok we have to create a completely new xattr */ 138 /* ok we have to create a completely new xattr */
139 ret = btrfs_insert_xattr_item(trans, root, path, inode->i_ino, 139 ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
140 name, name_len, value, size); 140 name, name_len, value, size);
141 BUG_ON(ret); 141 BUG_ON(ret);
142out: 142out:
@@ -190,7 +190,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
190 * NOTE: we set key.offset = 0; because we want to start with the 190 * NOTE: we set key.offset = 0; because we want to start with the
191 * first xattr that we find and walk forward 191 * first xattr that we find and walk forward
192 */ 192 */
193 key.objectid = inode->i_ino; 193 key.objectid = btrfs_ino(inode);
194 btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY); 194 btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
195 key.offset = 0; 195 key.offset = 0;
196 196
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 75c47cd8d086..1cd4c3a1862d 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -153,26 +153,6 @@ config CIFS_ACL
153 Allows to fetch CIFS/NTFS ACL from the server. The DACL blob 153 Allows to fetch CIFS/NTFS ACL from the server. The DACL blob
154 is handed over to the application/caller. 154 is handed over to the application/caller.
155 155
156config CIFS_SMB2
157 bool "SMB2 network file system support (EXPERIMENTAL)"
158 depends on EXPERIMENTAL && INET && BROKEN
159 select NLS
160 select KEYS
161 select FSCACHE
162 select DNS_RESOLVER
163
164 help
165 This enables experimental support for the SMB2 (Server Message Block
166 version 2) protocol. The SMB2 protocol is the successor to the
167 popular CIFS and SMB network file sharing protocols. SMB2 is the
168 native file sharing mechanism for recent versions of Windows
169 operating systems (since Vista). SMB2 enablement will eventually
170 allow users better performance, security and features, than would be
171 possible with cifs. Note that smb2 mount options also are simpler
172 (compared to cifs) due to protocol improvements.
173
174 Unless you are a developer or tester, say N.
175
176config CIFS_NFSD_EXPORT 156config CIFS_NFSD_EXPORT
177 bool "Allow nfsd to export CIFS file system (EXPERIMENTAL)" 157 bool "Allow nfsd to export CIFS file system (EXPERIMENTAL)"
178 depends on CIFS && EXPERIMENTAL 158 depends on CIFS && EXPERIMENTAL
diff --git a/fs/cifs/README b/fs/cifs/README
index 4a3ca0e5ca24..c5c2c5e5f0f2 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -457,6 +457,9 @@ A partial list of the supported mount options follows:
457 otherwise - read from the server. All written data are stored 457 otherwise - read from the server. All written data are stored
458 in the cache, but if the client doesn't have Exclusive Oplock, 458 in the cache, but if the client doesn't have Exclusive Oplock,
459 it writes the data to the server. 459 it writes the data to the server.
460 rwpidforward Forward pid of a process who opened a file to any read or write
461 operation on that file. This prevent applications like WINE
462 from failing on read and write if we use mandatory brlock style.
460 acl Allow setfacl and getfacl to manage posix ACLs if server 463 acl Allow setfacl and getfacl to manage posix ACLs if server
461 supports them. (default) 464 supports them. (default)
462 noacl Do not allow setfacl and getfacl calls on this mount 465 noacl Do not allow setfacl and getfacl calls on this mount
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
index 53d57a3fe427..dd8584d35a14 100644
--- a/fs/cifs/cache.c
+++ b/fs/cifs/cache.c
@@ -146,7 +146,7 @@ static char *extract_sharename(const char *treename)
146static uint16_t cifs_super_get_key(const void *cookie_netfs_data, void *buffer, 146static uint16_t cifs_super_get_key(const void *cookie_netfs_data, void *buffer,
147 uint16_t maxbuf) 147 uint16_t maxbuf)
148{ 148{
149 const struct cifsTconInfo *tcon = cookie_netfs_data; 149 const struct cifs_tcon *tcon = cookie_netfs_data;
150 char *sharename; 150 char *sharename;
151 uint16_t len; 151 uint16_t len;
152 152
@@ -173,7 +173,7 @@ cifs_fscache_super_get_aux(const void *cookie_netfs_data, void *buffer,
173 uint16_t maxbuf) 173 uint16_t maxbuf)
174{ 174{
175 struct cifs_fscache_super_auxdata auxdata; 175 struct cifs_fscache_super_auxdata auxdata;
176 const struct cifsTconInfo *tcon = cookie_netfs_data; 176 const struct cifs_tcon *tcon = cookie_netfs_data;
177 177
178 memset(&auxdata, 0, sizeof(auxdata)); 178 memset(&auxdata, 0, sizeof(auxdata));
179 auxdata.resource_id = tcon->resource_id; 179 auxdata.resource_id = tcon->resource_id;
@@ -192,7 +192,7 @@ fscache_checkaux cifs_fscache_super_check_aux(void *cookie_netfs_data,
192 uint16_t datalen) 192 uint16_t datalen)
193{ 193{
194 struct cifs_fscache_super_auxdata auxdata; 194 struct cifs_fscache_super_auxdata auxdata;
195 const struct cifsTconInfo *tcon = cookie_netfs_data; 195 const struct cifs_tcon *tcon = cookie_netfs_data;
196 196
197 if (datalen != sizeof(auxdata)) 197 if (datalen != sizeof(auxdata))
198 return FSCACHE_CHECKAUX_OBSOLETE; 198 return FSCACHE_CHECKAUX_OBSOLETE;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 18f4272d9047..2fe3cf13b2e9 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -110,8 +110,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
110 struct list_head *tmp1, *tmp2, *tmp3; 110 struct list_head *tmp1, *tmp2, *tmp3;
111 struct mid_q_entry *mid_entry; 111 struct mid_q_entry *mid_entry;
112 struct TCP_Server_Info *server; 112 struct TCP_Server_Info *server;
113 struct cifsSesInfo *ses; 113 struct cifs_ses *ses;
114 struct cifsTconInfo *tcon; 114 struct cifs_tcon *tcon;
115 int i, j; 115 int i, j;
116 __u32 dev_type; 116 __u32 dev_type;
117 117
@@ -152,7 +152,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
152 tcp_ses_list); 152 tcp_ses_list);
153 i++; 153 i++;
154 list_for_each(tmp2, &server->smb_ses_list) { 154 list_for_each(tmp2, &server->smb_ses_list) {
155 ses = list_entry(tmp2, struct cifsSesInfo, 155 ses = list_entry(tmp2, struct cifs_ses,
156 smb_ses_list); 156 smb_ses_list);
157 if ((ses->serverDomain == NULL) || 157 if ((ses->serverDomain == NULL) ||
158 (ses->serverOS == NULL) || 158 (ses->serverOS == NULL) ||
@@ -171,7 +171,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
171 seq_printf(m, "TCP status: %d\n\tLocal Users To " 171 seq_printf(m, "TCP status: %d\n\tLocal Users To "
172 "Server: %d SecMode: 0x%x Req On Wire: %d", 172 "Server: %d SecMode: 0x%x Req On Wire: %d",
173 server->tcpStatus, server->srv_count, 173 server->tcpStatus, server->srv_count,
174 server->secMode, 174 server->sec_mode,
175 atomic_read(&server->inFlight)); 175 atomic_read(&server->inFlight));
176 176
177#ifdef CONFIG_CIFS_STATS2 177#ifdef CONFIG_CIFS_STATS2
@@ -183,7 +183,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
183 seq_puts(m, "\n\tShares:"); 183 seq_puts(m, "\n\tShares:");
184 j = 0; 184 j = 0;
185 list_for_each(tmp3, &ses->tcon_list) { 185 list_for_each(tmp3, &ses->tcon_list) {
186 tcon = list_entry(tmp3, struct cifsTconInfo, 186 tcon = list_entry(tmp3, struct cifs_tcon,
187 tcon_list); 187 tcon_list);
188 ++j; 188 ++j;
189 dev_type = le32_to_cpu(tcon->fsDevInfo.DeviceType); 189 dev_type = le32_to_cpu(tcon->fsDevInfo.DeviceType);
@@ -256,8 +256,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
256 int rc; 256 int rc;
257 struct list_head *tmp1, *tmp2, *tmp3; 257 struct list_head *tmp1, *tmp2, *tmp3;
258 struct TCP_Server_Info *server; 258 struct TCP_Server_Info *server;
259 struct cifsSesInfo *ses; 259 struct cifs_ses *ses;
260 struct cifsTconInfo *tcon; 260 struct cifs_tcon *tcon;
261 261
262 rc = get_user(c, buffer); 262 rc = get_user(c, buffer);
263 if (rc) 263 if (rc)
@@ -273,11 +273,11 @@ static ssize_t cifs_stats_proc_write(struct file *file,
273 server = list_entry(tmp1, struct TCP_Server_Info, 273 server = list_entry(tmp1, struct TCP_Server_Info,
274 tcp_ses_list); 274 tcp_ses_list);
275 list_for_each(tmp2, &server->smb_ses_list) { 275 list_for_each(tmp2, &server->smb_ses_list) {
276 ses = list_entry(tmp2, struct cifsSesInfo, 276 ses = list_entry(tmp2, struct cifs_ses,
277 smb_ses_list); 277 smb_ses_list);
278 list_for_each(tmp3, &ses->tcon_list) { 278 list_for_each(tmp3, &ses->tcon_list) {
279 tcon = list_entry(tmp3, 279 tcon = list_entry(tmp3,
280 struct cifsTconInfo, 280 struct cifs_tcon,
281 tcon_list); 281 tcon_list);
282 atomic_set(&tcon->num_smbs_sent, 0); 282 atomic_set(&tcon->num_smbs_sent, 0);
283 atomic_set(&tcon->num_writes, 0); 283 atomic_set(&tcon->num_writes, 0);
@@ -312,8 +312,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
312 int i; 312 int i;
313 struct list_head *tmp1, *tmp2, *tmp3; 313 struct list_head *tmp1, *tmp2, *tmp3;
314 struct TCP_Server_Info *server; 314 struct TCP_Server_Info *server;
315 struct cifsSesInfo *ses; 315 struct cifs_ses *ses;
316 struct cifsTconInfo *tcon; 316 struct cifs_tcon *tcon;
317 317
318 seq_printf(m, 318 seq_printf(m,
319 "Resources in use\nCIFS Session: %d\n", 319 "Resources in use\nCIFS Session: %d\n",
@@ -346,11 +346,11 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
346 server = list_entry(tmp1, struct TCP_Server_Info, 346 server = list_entry(tmp1, struct TCP_Server_Info,
347 tcp_ses_list); 347 tcp_ses_list);
348 list_for_each(tmp2, &server->smb_ses_list) { 348 list_for_each(tmp2, &server->smb_ses_list) {
349 ses = list_entry(tmp2, struct cifsSesInfo, 349 ses = list_entry(tmp2, struct cifs_ses,
350 smb_ses_list); 350 smb_ses_list);
351 list_for_each(tmp3, &ses->tcon_list) { 351 list_for_each(tmp3, &ses->tcon_list) {
352 tcon = list_entry(tmp3, 352 tcon = list_entry(tmp3,
353 struct cifsTconInfo, 353 struct cifs_tcon,
354 tcon_list); 354 tcon_list);
355 i++; 355 i++;
356 seq_printf(m, "\n%d) %s", i, tcon->treeName); 356 seq_printf(m, "\n%d) %s", i, tcon->treeName);
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 2b68ac57d97d..8d8f28c94c0f 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -272,7 +272,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
272 struct dfs_info3_param *referrals = NULL; 272 struct dfs_info3_param *referrals = NULL;
273 unsigned int num_referrals = 0; 273 unsigned int num_referrals = 0;
274 struct cifs_sb_info *cifs_sb; 274 struct cifs_sb_info *cifs_sb;
275 struct cifsSesInfo *ses; 275 struct cifs_ses *ses;
276 char *full_path; 276 char *full_path;
277 int xid, i; 277 int xid, i;
278 int rc; 278 int rc;
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index a9d5692e0c20..ffb1459dc6ec 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -41,6 +41,7 @@
41#define CIFS_MOUNT_MF_SYMLINKS 0x10000 /* Minshall+French Symlinks enabled */ 41#define CIFS_MOUNT_MF_SYMLINKS 0x10000 /* Minshall+French Symlinks enabled */
42#define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */ 42#define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */
43#define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */ 43#define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */
44#define CIFS_MOUNT_RWPIDFORWARD 0x80000 /* use pid forwarding for rw */
44 45
45struct cifs_sb_info { 46struct cifs_sb_info {
46 struct rb_root tlink_tree; 47 struct rb_root tlink_tree;
@@ -56,8 +57,6 @@ struct cifs_sb_info {
56 mode_t mnt_file_mode; 57 mode_t mnt_file_mode;
57 mode_t mnt_dir_mode; 58 mode_t mnt_dir_mode;
58 unsigned int mnt_cifs_flags; 59 unsigned int mnt_cifs_flags;
59 int prepathlen;
60 char *prepath; /* relative path under the share to mount to */
61 char *mountdata; /* options received at mount time or via DFS refs */ 60 char *mountdata; /* options received at mount time or via DFS refs */
62 struct backing_dev_info bdi; 61 struct backing_dev_info bdi;
63 struct delayed_work prune_tlinks; 62 struct delayed_work prune_tlinks;
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 33d221394aca..2272fd5fe5b7 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -95,7 +95,7 @@ struct key_type cifs_spnego_key_type = {
95 95
96/* get a key struct with a SPNEGO security blob, suitable for session setup */ 96/* get a key struct with a SPNEGO security blob, suitable for session setup */
97struct key * 97struct key *
98cifs_get_spnego_key(struct cifsSesInfo *sesInfo) 98cifs_get_spnego_key(struct cifs_ses *sesInfo)
99{ 99{
100 struct TCP_Server_Info *server = sesInfo->server; 100 struct TCP_Server_Info *server = sesInfo->server;
101 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr; 101 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
diff --git a/fs/cifs/cifs_spnego.h b/fs/cifs/cifs_spnego.h
index e4041ec4d712..31bef9ee078b 100644
--- a/fs/cifs/cifs_spnego.h
+++ b/fs/cifs/cifs_spnego.h
@@ -41,7 +41,7 @@ struct cifs_spnego_msg {
41 41
42#ifdef __KERNEL__ 42#ifdef __KERNEL__
43extern struct key_type cifs_spnego_key_type; 43extern struct key_type cifs_spnego_key_type;
44extern struct key *cifs_get_spnego_key(struct cifsSesInfo *sesInfo); 44extern struct key *cifs_get_spnego_key(struct cifs_ses *sesInfo);
45#endif /* KERNEL */ 45#endif /* KERNEL */
46 46
47#endif /* _CIFS_SPNEGO_H */ 47#endif /* _CIFS_SPNEGO_H */
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index f3c6fb9942ac..8f1700623b41 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -38,7 +38,7 @@ static const struct cifs_sid sid_everyone = {
38 1, 1, {0, 0, 0, 0, 0, 1}, {0} }; 38 1, 1, {0, 0, 0, 0, 0, 1}, {0} };
39/* security id for Authenticated Users system group */ 39/* security id for Authenticated Users system group */
40static const struct cifs_sid sid_authusers = { 40static const struct cifs_sid sid_authusers = {
41 1, 1, {0, 0, 0, 0, 0, 5}, {11} }; 41 1, 1, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(11)} };
42/* group users */ 42/* group users */
43static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} }; 43static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
44 44
@@ -458,7 +458,8 @@ int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
458 if (num_subauth) { 458 if (num_subauth) {
459 for (i = 0; i < num_subauth; ++i) { 459 for (i = 0; i < num_subauth; ++i) {
460 if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) { 460 if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
461 if (ctsid->sub_auth[i] > cwsid->sub_auth[i]) 461 if (le32_to_cpu(ctsid->sub_auth[i]) >
462 le32_to_cpu(cwsid->sub_auth[i]))
462 return 1; 463 return 1;
463 else 464 else
464 return -1; 465 return -1;
@@ -945,7 +946,7 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
945 int oplock = 0; 946 int oplock = 0;
946 int xid, rc; 947 int xid, rc;
947 __u16 fid; 948 __u16 fid;
948 struct cifsTconInfo *tcon; 949 struct cifs_tcon *tcon;
949 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); 950 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
950 951
951 if (IS_ERR(tlink)) 952 if (IS_ERR(tlink))
@@ -1013,7 +1014,7 @@ static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path,
1013 int oplock = 0; 1014 int oplock = 0;
1014 int xid, rc; 1015 int xid, rc;
1015 __u16 fid; 1016 __u16 fid;
1016 struct cifsTconInfo *tcon; 1017 struct cifs_tcon *tcon;
1017 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); 1018 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1018 1019
1019 if (IS_ERR(tlink)) 1020 if (IS_ERR(tlink))
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 45c3f78c8f81..dfbd9f1f373d 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -229,7 +229,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
229} 229}
230 230
231/* first calculate 24 bytes ntlm response and then 16 byte session key */ 231/* first calculate 24 bytes ntlm response and then 16 byte session key */
232int setup_ntlm_response(struct cifsSesInfo *ses) 232int setup_ntlm_response(struct cifs_ses *ses)
233{ 233{
234 int rc = 0; 234 int rc = 0;
235 unsigned int temp_len = CIFS_SESS_KEY_SIZE + CIFS_AUTH_RESP_SIZE; 235 unsigned int temp_len = CIFS_SESS_KEY_SIZE + CIFS_AUTH_RESP_SIZE;
@@ -312,7 +312,7 @@ int calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
312 * Allocate domain name which gets freed when session struct is deallocated. 312 * Allocate domain name which gets freed when session struct is deallocated.
313 */ 313 */
314static int 314static int
315build_avpair_blob(struct cifsSesInfo *ses, const struct nls_table *nls_cp) 315build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
316{ 316{
317 unsigned int dlen; 317 unsigned int dlen;
318 unsigned int wlen; 318 unsigned int wlen;
@@ -400,7 +400,7 @@ build_avpair_blob(struct cifsSesInfo *ses, const struct nls_table *nls_cp)
400 * about target string i.e. for some, just user name might suffice. 400 * about target string i.e. for some, just user name might suffice.
401 */ 401 */
402static int 402static int
403find_domain_name(struct cifsSesInfo *ses, const struct nls_table *nls_cp) 403find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
404{ 404{
405 unsigned int attrsize; 405 unsigned int attrsize;
406 unsigned int type; 406 unsigned int type;
@@ -445,7 +445,7 @@ find_domain_name(struct cifsSesInfo *ses, const struct nls_table *nls_cp)
445 return 0; 445 return 0;
446} 446}
447 447
448static int calc_ntlmv2_hash(struct cifsSesInfo *ses, char *ntlmv2_hash, 448static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
449 const struct nls_table *nls_cp) 449 const struct nls_table *nls_cp)
450{ 450{
451 int rc = 0; 451 int rc = 0;
@@ -527,7 +527,7 @@ calc_exit_2:
527} 527}
528 528
529static int 529static int
530CalcNTLMv2_response(const struct cifsSesInfo *ses, char *ntlmv2_hash) 530CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
531{ 531{
532 int rc; 532 int rc;
533 unsigned int offset = CIFS_SESS_KEY_SIZE + 8; 533 unsigned int offset = CIFS_SESS_KEY_SIZE + 8;
@@ -563,7 +563,7 @@ CalcNTLMv2_response(const struct cifsSesInfo *ses, char *ntlmv2_hash)
563 563
564 564
565int 565int
566setup_ntlmv2_rsp(struct cifsSesInfo *ses, const struct nls_table *nls_cp) 566setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
567{ 567{
568 int rc; 568 int rc;
569 int baselen; 569 int baselen;
@@ -649,7 +649,7 @@ setup_ntlmv2_rsp_ret:
649} 649}
650 650
651int 651int
652calc_seckey(struct cifsSesInfo *ses) 652calc_seckey(struct cifs_ses *ses)
653{ 653{
654 int rc; 654 int rc;
655 struct crypto_blkcipher *tfm_arc4; 655 struct crypto_blkcipher *tfm_arc4;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 493b74ca5648..989442dcfb45 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -104,46 +104,25 @@ cifs_sb_deactive(struct super_block *sb)
104} 104}
105 105
106static int 106static int
107cifs_read_super(struct super_block *sb, void *data, 107cifs_read_super(struct super_block *sb, struct smb_vol *volume_info,
108 const char *devname, int silent) 108 const char *devname, int silent)
109{ 109{
110 struct inode *inode; 110 struct inode *inode;
111 struct cifs_sb_info *cifs_sb; 111 struct cifs_sb_info *cifs_sb;
112 int rc = 0; 112 int rc = 0;
113 113
114 /* BB should we make this contingent on mount parm? */
115 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
116 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
117 cifs_sb = CIFS_SB(sb); 114 cifs_sb = CIFS_SB(sb);
118 if (cifs_sb == NULL)
119 return -ENOMEM;
120 115
121 spin_lock_init(&cifs_sb->tlink_tree_lock); 116 spin_lock_init(&cifs_sb->tlink_tree_lock);
122 cifs_sb->tlink_tree = RB_ROOT; 117 cifs_sb->tlink_tree = RB_ROOT;
123 118
124 rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); 119 rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
125 if (rc) { 120 if (rc)
126 kfree(cifs_sb);
127 return rc; 121 return rc;
128 }
129 cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
130 122
131 /* 123 cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
132 * Copy mount params to sb for use in submounts. Better to do
133 * the copy here and deal with the error before cleanup gets
134 * complicated post-mount.
135 */
136 if (data) {
137 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
138 if (cifs_sb->mountdata == NULL) {
139 bdi_destroy(&cifs_sb->bdi);
140 kfree(sb->s_fs_info);
141 sb->s_fs_info = NULL;
142 return -ENOMEM;
143 }
144 }
145 124
146 rc = cifs_mount(sb, cifs_sb, devname); 125 rc = cifs_mount(sb, cifs_sb, volume_info, devname);
147 126
148 if (rc) { 127 if (rc) {
149 if (!silent) 128 if (!silent)
@@ -194,15 +173,7 @@ out_no_root:
194 cifs_umount(sb, cifs_sb); 173 cifs_umount(sb, cifs_sb);
195 174
196out_mount_failed: 175out_mount_failed:
197 if (cifs_sb) { 176 bdi_destroy(&cifs_sb->bdi);
198 if (cifs_sb->mountdata) {
199 kfree(cifs_sb->mountdata);
200 cifs_sb->mountdata = NULL;
201 }
202 unload_nls(cifs_sb->local_nls);
203 bdi_destroy(&cifs_sb->bdi);
204 kfree(cifs_sb);
205 }
206 return rc; 177 return rc;
207} 178}
208 179
@@ -237,7 +208,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
237{ 208{
238 struct super_block *sb = dentry->d_sb; 209 struct super_block *sb = dentry->d_sb;
239 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 210 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
240 struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb); 211 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
241 int rc = -EOPNOTSUPP; 212 int rc = -EOPNOTSUPP;
242 int xid; 213 int xid;
243 214
@@ -390,7 +361,7 @@ static int
390cifs_show_options(struct seq_file *s, struct vfsmount *m) 361cifs_show_options(struct seq_file *s, struct vfsmount *m)
391{ 362{
392 struct cifs_sb_info *cifs_sb = CIFS_SB(m->mnt_sb); 363 struct cifs_sb_info *cifs_sb = CIFS_SB(m->mnt_sb);
393 struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb); 364 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
394 struct sockaddr *srcaddr; 365 struct sockaddr *srcaddr;
395 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr; 366 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
396 367
@@ -444,14 +415,20 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
444 seq_printf(s, ",nocase"); 415 seq_printf(s, ",nocase");
445 if (tcon->retry) 416 if (tcon->retry)
446 seq_printf(s, ",hard"); 417 seq_printf(s, ",hard");
447 if (cifs_sb->prepath) 418 if (tcon->unix_ext)
448 seq_printf(s, ",prepath=%s", cifs_sb->prepath); 419 seq_printf(s, ",unix");
420 else
421 seq_printf(s, ",nounix");
449 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) 422 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
450 seq_printf(s, ",posixpaths"); 423 seq_printf(s, ",posixpaths");
451 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) 424 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
452 seq_printf(s, ",setuids"); 425 seq_printf(s, ",setuids");
453 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) 426 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
454 seq_printf(s, ",serverino"); 427 seq_printf(s, ",serverino");
428 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
429 seq_printf(s, ",rwpidforward");
430 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
431 seq_printf(s, ",forcemand");
455 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) 432 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
456 seq_printf(s, ",directio"); 433 seq_printf(s, ",directio");
457 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 434 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
@@ -484,7 +461,7 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
484static void cifs_umount_begin(struct super_block *sb) 461static void cifs_umount_begin(struct super_block *sb)
485{ 462{
486 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 463 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
487 struct cifsTconInfo *tcon; 464 struct cifs_tcon *tcon;
488 465
489 if (cifs_sb == NULL) 466 if (cifs_sb == NULL)
490 return; 467 return;
@@ -559,29 +536,189 @@ static const struct super_operations cifs_super_ops = {
559#endif 536#endif
560}; 537};
561 538
539/*
540 * Get root dentry from superblock according to prefix path mount option.
541 * Return dentry with refcount + 1 on success and NULL otherwise.
542 */
543static struct dentry *
544cifs_get_root(struct smb_vol *vol, struct super_block *sb)
545{
546 int xid, rc;
547 struct inode *inode;
548 struct qstr name;
549 struct dentry *dparent = NULL, *dchild = NULL, *alias;
550 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
551 unsigned int i, full_len, len;
552 char *full_path = NULL, *pstart;
553 char sep;
554
555 full_path = cifs_build_path_to_root(vol, cifs_sb,
556 cifs_sb_master_tcon(cifs_sb));
557 if (full_path == NULL)
558 return NULL;
559
560 cFYI(1, "Get root dentry for %s", full_path);
561
562 xid = GetXid();
563 sep = CIFS_DIR_SEP(cifs_sb);
564 dparent = dget(sb->s_root);
565 full_len = strlen(full_path);
566 full_path[full_len] = sep;
567 pstart = full_path + 1;
568
569 for (i = 1, len = 0; i <= full_len; i++) {
570 if (full_path[i] != sep || !len) {
571 len++;
572 continue;
573 }
574
575 full_path[i] = 0;
576 cFYI(1, "get dentry for %s", pstart);
577
578 name.name = pstart;
579 name.len = len;
580 name.hash = full_name_hash(pstart, len);
581 dchild = d_lookup(dparent, &name);
582 if (dchild == NULL) {
583 cFYI(1, "not exists");
584 dchild = d_alloc(dparent, &name);
585 if (dchild == NULL) {
586 dput(dparent);
587 dparent = NULL;
588 goto out;
589 }
590 }
591
592 cFYI(1, "get inode");
593 if (dchild->d_inode == NULL) {
594 cFYI(1, "not exists");
595 inode = NULL;
596 if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
597 rc = cifs_get_inode_info_unix(&inode, full_path,
598 sb, xid);
599 else
600 rc = cifs_get_inode_info(&inode, full_path,
601 NULL, sb, xid, NULL);
602 if (rc) {
603 dput(dchild);
604 dput(dparent);
605 dparent = NULL;
606 goto out;
607 }
608 alias = d_materialise_unique(dchild, inode);
609 if (alias != NULL) {
610 dput(dchild);
611 if (IS_ERR(alias)) {
612 dput(dparent);
613 dparent = NULL;
614 goto out;
615 }
616 dchild = alias;
617 }
618 }
619 cFYI(1, "parent %p, child %p", dparent, dchild);
620
621 dput(dparent);
622 dparent = dchild;
623 len = 0;
624 pstart = full_path + i + 1;
625 full_path[i] = sep;
626 }
627out:
628 _FreeXid(xid);
629 kfree(full_path);
630 return dparent;
631}
632
562static struct dentry * 633static struct dentry *
563cifs_do_mount(struct file_system_type *fs_type, 634cifs_do_mount(struct file_system_type *fs_type,
564 int flags, const char *dev_name, void *data) 635 int flags, const char *dev_name, void *data)
565{ 636{
566 int rc; 637 int rc;
567 struct super_block *sb; 638 struct super_block *sb;
568 639 struct cifs_sb_info *cifs_sb;
569 sb = sget(fs_type, NULL, set_anon_super, NULL); 640 struct smb_vol *volume_info;
641 struct cifs_mnt_data mnt_data;
642 struct dentry *root;
570 643
571 cFYI(1, "Devname: %s flags: %d ", dev_name, flags); 644 cFYI(1, "Devname: %s flags: %d ", dev_name, flags);
572 645
573 if (IS_ERR(sb)) 646 rc = cifs_setup_volume_info(&volume_info, (char *)data, dev_name);
574 return ERR_CAST(sb); 647 if (rc)
648 return ERR_PTR(rc);
649
650 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
651 if (cifs_sb == NULL) {
652 root = ERR_PTR(-ENOMEM);
653 goto out;
654 }
655
656 cifs_setup_cifs_sb(volume_info, cifs_sb);
657
658 mnt_data.vol = volume_info;
659 mnt_data.cifs_sb = cifs_sb;
660 mnt_data.flags = flags;
661
662 sb = sget(fs_type, cifs_match_super, set_anon_super, &mnt_data);
663 if (IS_ERR(sb)) {
664 root = ERR_CAST(sb);
665 goto out_cifs_sb;
666 }
667
668 if (sb->s_fs_info) {
669 cFYI(1, "Use existing superblock");
670 goto out_shared;
671 }
672
673 /*
674 * Copy mount params for use in submounts. Better to do
675 * the copy here and deal with the error before cleanup gets
676 * complicated post-mount.
677 */
678 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
679 if (cifs_sb->mountdata == NULL) {
680 root = ERR_PTR(-ENOMEM);
681 goto out_super;
682 }
575 683
576 sb->s_flags = flags; 684 sb->s_flags = flags;
685 /* BB should we make this contingent on mount parm? */
686 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
687 sb->s_fs_info = cifs_sb;
577 688
578 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0); 689 rc = cifs_read_super(sb, volume_info, dev_name,
690 flags & MS_SILENT ? 1 : 0);
579 if (rc) { 691 if (rc) {
580 deactivate_locked_super(sb); 692 root = ERR_PTR(rc);
581 return ERR_PTR(rc); 693 goto out_super;
582 } 694 }
695
583 sb->s_flags |= MS_ACTIVE; 696 sb->s_flags |= MS_ACTIVE;
584 return dget(sb->s_root); 697
698 root = cifs_get_root(volume_info, sb);
699 if (root == NULL)
700 goto out_super;
701
702 cFYI(1, "dentry root is: %p", root);
703 goto out;
704
705out_shared:
706 root = cifs_get_root(volume_info, sb);
707 if (root)
708 cFYI(1, "dentry root is: %p", root);
709 goto out;
710
711out_super:
712 kfree(cifs_sb->mountdata);
713 deactivate_locked_super(sb);
714
715out_cifs_sb:
716 unload_nls(cifs_sb->local_nls);
717 kfree(cifs_sb);
718
719out:
720 cifs_cleanup_volume_info(&volume_info);
721 return root;
585} 722}
586 723
587static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, 724static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 76b4517e74b0..6255fa812c7a 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -155,6 +155,81 @@ struct cifs_cred {
155 ***************************************************************** 155 *****************************************************************
156 */ 156 */
157 157
158struct smb_vol {
159 char *username;
160 char *password;
161 char *domainname;
162 char *UNC;
163 char *UNCip;
164 char *iocharset; /* local code page for mapping to and from Unicode */
165 char source_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* clnt nb name */
166 char target_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* srvr nb name */
167 uid_t cred_uid;
168 uid_t linux_uid;
169 gid_t linux_gid;
170 mode_t file_mode;
171 mode_t dir_mode;
172 unsigned secFlg;
173 bool retry:1;
174 bool intr:1;
175 bool setuids:1;
176 bool override_uid:1;
177 bool override_gid:1;
178 bool dynperm:1;
179 bool noperm:1;
180 bool no_psx_acl:1; /* set if posix acl support should be disabled */
181 bool cifs_acl:1;
182 bool no_xattr:1; /* set if xattr (EA) support should be disabled*/
183 bool server_ino:1; /* use inode numbers from server ie UniqueId */
184 bool direct_io:1;
185 bool strict_io:1; /* strict cache behavior */
186 bool remap:1; /* set to remap seven reserved chars in filenames */
187 bool posix_paths:1; /* unset to not ask for posix pathnames. */
188 bool no_linux_ext:1;
189 bool sfu_emul:1;
190 bool nullauth:1; /* attempt to authenticate with null user */
191 bool nocase:1; /* request case insensitive filenames */
192 bool nobrl:1; /* disable sending byte range locks to srv */
193 bool mand_lock:1; /* send mandatory not posix byte range lock reqs */
194 bool seal:1; /* request transport encryption on share */
195 bool nodfs:1; /* Do not request DFS, even if available */
196 bool local_lease:1; /* check leases only on local system, not remote */
197 bool noblocksnd:1;
198 bool noautotune:1;
199 bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
200 bool fsc:1; /* enable fscache */
201 bool mfsymlinks:1; /* use Minshall+French Symlinks */
202 bool multiuser:1;
203 bool rwpidforward:1; /* pid forward for read/write operations */
204 unsigned int rsize;
205 unsigned int wsize;
206 bool sockopt_tcp_nodelay:1;
207 unsigned short int port;
208 unsigned long actimeo; /* attribute cache timeout (jiffies) */
209 char *prepath;
210 struct sockaddr_storage srcaddr; /* allow binding to a local IP */
211 struct nls_table *local_nls;
212};
213
214#define CIFS_MOUNT_MASK (CIFS_MOUNT_NO_PERM | CIFS_MOUNT_SET_UID | \
215 CIFS_MOUNT_SERVER_INUM | CIFS_MOUNT_DIRECT_IO | \
216 CIFS_MOUNT_NO_XATTR | CIFS_MOUNT_MAP_SPECIAL_CHR | \
217 CIFS_MOUNT_UNX_EMUL | CIFS_MOUNT_NO_BRL | \
218 CIFS_MOUNT_CIFS_ACL | CIFS_MOUNT_OVERR_UID | \
219 CIFS_MOUNT_OVERR_GID | CIFS_MOUNT_DYNPERM | \
220 CIFS_MOUNT_NOPOSIXBRL | CIFS_MOUNT_NOSSYNC | \
221 CIFS_MOUNT_FSCACHE | CIFS_MOUNT_MF_SYMLINKS | \
222 CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_STRICT_IO)
223
224#define CIFS_MS_MASK (MS_RDONLY | MS_MANDLOCK | MS_NOEXEC | MS_NOSUID | \
225 MS_NODEV | MS_SYNCHRONOUS)
226
227struct cifs_mnt_data {
228 struct cifs_sb_info *cifs_sb;
229 struct smb_vol *vol;
230 int flags;
231};
232
158struct TCP_Server_Info { 233struct TCP_Server_Info {
159 struct list_head tcp_ses_list; 234 struct list_head tcp_ses_list;
160 struct list_head smb_ses_list; 235 struct list_head smb_ses_list;
@@ -179,7 +254,7 @@ struct TCP_Server_Info {
179 struct mutex srv_mutex; 254 struct mutex srv_mutex;
180 struct task_struct *tsk; 255 struct task_struct *tsk;
181 char server_GUID[16]; 256 char server_GUID[16];
182 char secMode; 257 char sec_mode;
183 bool session_estab; /* mark when very first sess is established */ 258 bool session_estab; /* mark when very first sess is established */
184 u16 dialect; /* dialect index that server chose */ 259 u16 dialect; /* dialect index that server chose */
185 enum securityEnum secType; 260 enum securityEnum secType;
@@ -254,7 +329,7 @@ static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
254/* 329/*
255 * Session structure. One of these for each uid session with a particular host 330 * Session structure. One of these for each uid session with a particular host
256 */ 331 */
257struct cifsSesInfo { 332struct cifs_ses {
258 struct list_head smb_ses_list; 333 struct list_head smb_ses_list;
259 struct list_head tcon_list; 334 struct list_head tcon_list;
260 struct mutex session_mutex; 335 struct mutex session_mutex;
@@ -294,11 +369,11 @@ struct cifsSesInfo {
294 * there is one of these for each connection to a resource on a particular 369 * there is one of these for each connection to a resource on a particular
295 * session 370 * session
296 */ 371 */
297struct cifsTconInfo { 372struct cifs_tcon {
298 struct list_head tcon_list; 373 struct list_head tcon_list;
299 int tc_count; 374 int tc_count;
300 struct list_head openFileList; 375 struct list_head openFileList;
301 struct cifsSesInfo *ses; /* pointer to session associated with */ 376 struct cifs_ses *ses; /* pointer to session associated with */
302 char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */ 377 char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
303 char *nativeFileSystem; 378 char *nativeFileSystem;
304 char *password; /* for share-level security */ 379 char *password; /* for share-level security */
@@ -380,12 +455,12 @@ struct tcon_link {
380#define TCON_LINK_IN_TREE 2 455#define TCON_LINK_IN_TREE 2
381 unsigned long tl_time; 456 unsigned long tl_time;
382 atomic_t tl_count; 457 atomic_t tl_count;
383 struct cifsTconInfo *tl_tcon; 458 struct cifs_tcon *tl_tcon;
384}; 459};
385 460
386extern struct tcon_link *cifs_sb_tlink(struct cifs_sb_info *cifs_sb); 461extern struct tcon_link *cifs_sb_tlink(struct cifs_sb_info *cifs_sb);
387 462
388static inline struct cifsTconInfo * 463static inline struct cifs_tcon *
389tlink_tcon(struct tcon_link *tlink) 464tlink_tcon(struct tcon_link *tlink)
390{ 465{
391 return tlink->tl_tcon; 466 return tlink->tl_tcon;
@@ -402,7 +477,7 @@ cifs_get_tlink(struct tcon_link *tlink)
402} 477}
403 478
404/* This function is always expected to succeed */ 479/* This function is always expected to succeed */
405extern struct cifsTconInfo *cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb); 480extern struct cifs_tcon *cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb);
406 481
407/* 482/*
408 * This info hangs off the cifsFileInfo structure, pointed to by llist. 483 * This info hangs off the cifsFileInfo structure, pointed to by llist.
@@ -455,6 +530,14 @@ struct cifsFileInfo {
455 struct work_struct oplock_break; /* work for oplock breaks */ 530 struct work_struct oplock_break; /* work for oplock breaks */
456}; 531};
457 532
533struct cifs_io_parms {
534 __u16 netfid;
535 __u32 pid;
536 __u64 offset;
537 unsigned int length;
538 struct cifs_tcon *tcon;
539};
540
458/* 541/*
459 * Take a reference on the file private data. Must be called with 542 * Take a reference on the file private data. Must be called with
460 * cifs_file_list_lock held. 543 * cifs_file_list_lock held.
@@ -509,10 +592,30 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
509 return '\\'; 592 return '\\';
510} 593}
511 594
595static inline void
596convert_delimiter(char *path, char delim)
597{
598 int i;
599 char old_delim;
600
601 if (path == NULL)
602 return;
603
604 if (delim == '/')
605 old_delim = '\\';
606 else
607 old_delim = '/';
608
609 for (i = 0; path[i] != '\0'; i++) {
610 if (path[i] == old_delim)
611 path[i] = delim;
612 }
613}
614
512#ifdef CONFIG_CIFS_STATS 615#ifdef CONFIG_CIFS_STATS
513#define cifs_stats_inc atomic_inc 616#define cifs_stats_inc atomic_inc
514 617
515static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon, 618static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
516 unsigned int bytes) 619 unsigned int bytes)
517{ 620{
518 if (bytes) { 621 if (bytes) {
@@ -522,7 +625,7 @@ static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
522 } 625 }
523} 626}
524 627
525static inline void cifs_stats_bytes_read(struct cifsTconInfo *tcon, 628static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon,
526 unsigned int bytes) 629 unsigned int bytes)
527{ 630{
528 spin_lock(&tcon->stat_lock); 631 spin_lock(&tcon->stat_lock);
@@ -543,9 +646,8 @@ struct mid_q_entry;
543 * This is the prototype for the mid callback function. When creating one, 646 * This is the prototype for the mid callback function. When creating one,
544 * take special care to avoid deadlocks. Things to bear in mind: 647 * take special care to avoid deadlocks. Things to bear in mind:
545 * 648 *
546 * - it will be called by cifsd 649 * - it will be called by cifsd, with no locks held
547 * - the GlobalMid_Lock will be held 650 * - the mid will be removed from any lists
548 * - the mid will be removed from the pending_mid_q list
549 */ 651 */
550typedef void (mid_callback_t)(struct mid_q_entry *mid); 652typedef void (mid_callback_t)(struct mid_q_entry *mid);
551 653
@@ -573,7 +675,7 @@ struct mid_q_entry {
573struct oplock_q_entry { 675struct oplock_q_entry {
574 struct list_head qhead; 676 struct list_head qhead;
575 struct inode *pinode; 677 struct inode *pinode;
576 struct cifsTconInfo *tcon; 678 struct cifs_tcon *tcon;
577 __u16 netfid; 679 __u16 netfid;
578}; 680};
579 681
@@ -656,6 +758,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
656#define MID_RESPONSE_RECEIVED 4 758#define MID_RESPONSE_RECEIVED 4
657#define MID_RETRY_NEEDED 8 /* session closed while this request out */ 759#define MID_RETRY_NEEDED 8 /* session closed while this request out */
658#define MID_RESPONSE_MALFORMED 0x10 760#define MID_RESPONSE_MALFORMED 0x10
761#define MID_SHUTDOWN 0x20
659 762
660/* Types of response buffer returned from SendReceive2 */ 763/* Types of response buffer returned from SendReceive2 */
661#define CIFS_NO_BUFFER 0 /* Response buffer not returned */ 764#define CIFS_NO_BUFFER 0 /* Response buffer not returned */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 6e69e06a30b3..953f84413c77 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -57,8 +57,9 @@ extern int init_cifs_idmap(void);
57extern void exit_cifs_idmap(void); 57extern void exit_cifs_idmap(void);
58extern void cifs_destroy_idmaptrees(void); 58extern void cifs_destroy_idmaptrees(void);
59extern char *build_path_from_dentry(struct dentry *); 59extern char *build_path_from_dentry(struct dentry *);
60extern char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb, 60extern char *cifs_build_path_to_root(struct smb_vol *vol,
61 struct cifsTconInfo *tcon); 61 struct cifs_sb_info *cifs_sb,
62 struct cifs_tcon *tcon);
62extern char *build_wildcard_path_from_dentry(struct dentry *direntry); 63extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
63extern char *cifs_compose_mount_options(const char *sb_mountdata, 64extern char *cifs_compose_mount_options(const char *sb_mountdata,
64 const char *fullpath, const struct dfs_info3_param *ref, 65 const char *fullpath, const struct dfs_info3_param *ref,
@@ -67,20 +68,22 @@ extern char *cifs_compose_mount_options(const char *sb_mountdata,
67extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer, 68extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
68 struct TCP_Server_Info *server); 69 struct TCP_Server_Info *server);
69extern void DeleteMidQEntry(struct mid_q_entry *midEntry); 70extern void DeleteMidQEntry(struct mid_q_entry *midEntry);
70extern int cifs_call_async(struct TCP_Server_Info *server, 71extern int cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
71 struct smb_hdr *in_buf, mid_callback_t *callback, 72 unsigned int nvec, mid_callback_t *callback,
72 void *cbdata); 73 void *cbdata, bool ignore_pend);
73extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *, 74extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
74 struct smb_hdr * /* input */ , 75 struct smb_hdr * /* input */ ,
75 struct smb_hdr * /* out */ , 76 struct smb_hdr * /* out */ ,
76 int * /* bytes returned */ , const int long_op); 77 int * /* bytes returned */ , const int long_op);
77extern int SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses, 78extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
78 struct smb_hdr *in_buf, int flags); 79 struct smb_hdr *in_buf, int flags);
79extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *, 80extern int cifs_check_receive(struct mid_q_entry *mid,
81 struct TCP_Server_Info *server, bool log_error);
82extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
80 struct kvec *, int /* nvec to send */, 83 struct kvec *, int /* nvec to send */,
81 int * /* type of buf returned */ , const int flags); 84 int * /* type of buf returned */ , const int flags);
82extern int SendReceiveBlockingLock(const unsigned int xid, 85extern int SendReceiveBlockingLock(const unsigned int xid,
83 struct cifsTconInfo *ptcon, 86 struct cifs_tcon *ptcon,
84 struct smb_hdr *in_buf , 87 struct smb_hdr *in_buf ,
85 struct smb_hdr *out_buf, 88 struct smb_hdr *out_buf,
86 int *bytes_returned); 89 int *bytes_returned);
@@ -99,14 +102,14 @@ extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len);
99extern int cifs_set_port(struct sockaddr *addr, const unsigned short int port); 102extern int cifs_set_port(struct sockaddr *addr, const unsigned short int port);
100extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len, 103extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
101 const unsigned short int port); 104 const unsigned short int port);
102extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr); 105extern int map_smb_to_linux_error(struct smb_hdr *smb, bool logErr);
103extern void header_assemble(struct smb_hdr *, char /* command */ , 106extern void header_assemble(struct smb_hdr *, char /* command */ ,
104 const struct cifsTconInfo *, int /* length of 107 const struct cifs_tcon *, int /* length of
105 fixed section (word count) in two byte units */); 108 fixed section (word count) in two byte units */);
106extern int small_smb_init_no_tc(const int smb_cmd, const int wct, 109extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
107 struct cifsSesInfo *ses, 110 struct cifs_ses *ses,
108 void **request_buf); 111 void **request_buf);
109extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, 112extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses,
110 const struct nls_table *nls_cp); 113 const struct nls_table *nls_cp);
111extern __u16 GetNextMid(struct TCP_Server_Info *server); 114extern __u16 GetNextMid(struct TCP_Server_Info *server);
112extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); 115extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
@@ -148,102 +151,108 @@ extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *,
148extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *, 151extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
149 const char *); 152 const char *);
150 153
154extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
155 struct cifs_sb_info *cifs_sb);
156extern int cifs_match_super(struct super_block *, void *);
157extern void cifs_cleanup_volume_info(struct smb_vol **pvolume_info);
158extern int cifs_setup_volume_info(struct smb_vol **pvolume_info,
159 char *mount_data, const char *devname);
151extern int cifs_mount(struct super_block *, struct cifs_sb_info *, 160extern int cifs_mount(struct super_block *, struct cifs_sb_info *,
152 const char *); 161 struct smb_vol *, const char *);
153extern int cifs_umount(struct super_block *, struct cifs_sb_info *); 162extern int cifs_umount(struct super_block *, struct cifs_sb_info *);
154extern void cifs_dfs_release_automount_timer(void); 163extern void cifs_dfs_release_automount_timer(void);
155void cifs_proc_init(void); 164void cifs_proc_init(void);
156void cifs_proc_clean(void); 165void cifs_proc_clean(void);
157 166
158extern int cifs_negotiate_protocol(unsigned int xid, 167extern int cifs_negotiate_protocol(unsigned int xid,
159 struct cifsSesInfo *ses); 168 struct cifs_ses *ses);
160extern int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses, 169extern int cifs_setup_session(unsigned int xid, struct cifs_ses *ses,
161 struct nls_table *nls_info); 170 struct nls_table *nls_info);
162extern int CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses); 171extern int CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses);
163 172
164extern int CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, 173extern int CIFSTCon(unsigned int xid, struct cifs_ses *ses,
165 const char *tree, struct cifsTconInfo *tcon, 174 const char *tree, struct cifs_tcon *tcon,
166 const struct nls_table *); 175 const struct nls_table *);
167 176
168extern int CIFSFindFirst(const int xid, struct cifsTconInfo *tcon, 177extern int CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
169 const char *searchName, const struct nls_table *nls_codepage, 178 const char *searchName, const struct nls_table *nls_codepage,
170 __u16 *searchHandle, struct cifs_search_info *psrch_inf, 179 __u16 *searchHandle, struct cifs_search_info *psrch_inf,
171 int map, const char dirsep); 180 int map, const char dirsep);
172 181
173extern int CIFSFindNext(const int xid, struct cifsTconInfo *tcon, 182extern int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
174 __u16 searchHandle, struct cifs_search_info *psrch_inf); 183 __u16 searchHandle, struct cifs_search_info *psrch_inf);
175 184
176extern int CIFSFindClose(const int, struct cifsTconInfo *tcon, 185extern int CIFSFindClose(const int, struct cifs_tcon *tcon,
177 const __u16 search_handle); 186 const __u16 search_handle);
178 187
179extern int CIFSSMBQFileInfo(const int xid, struct cifsTconInfo *tcon, 188extern int CIFSSMBQFileInfo(const int xid, struct cifs_tcon *tcon,
180 u16 netfid, FILE_ALL_INFO *pFindData); 189 u16 netfid, FILE_ALL_INFO *pFindData);
181extern int CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon, 190extern int CIFSSMBQPathInfo(const int xid, struct cifs_tcon *tcon,
182 const unsigned char *searchName, 191 const unsigned char *searchName,
183 FILE_ALL_INFO *findData, 192 FILE_ALL_INFO *findData,
184 int legacy /* whether to use old info level */, 193 int legacy /* whether to use old info level */,
185 const struct nls_table *nls_codepage, int remap); 194 const struct nls_table *nls_codepage, int remap);
186extern int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon, 195extern int SMBQueryInformation(const int xid, struct cifs_tcon *tcon,
187 const unsigned char *searchName, 196 const unsigned char *searchName,
188 FILE_ALL_INFO *findData, 197 FILE_ALL_INFO *findData,
189 const struct nls_table *nls_codepage, int remap); 198 const struct nls_table *nls_codepage, int remap);
190 199
191extern int CIFSSMBUnixQFileInfo(const int xid, struct cifsTconInfo *tcon, 200extern int CIFSSMBUnixQFileInfo(const int xid, struct cifs_tcon *tcon,
192 u16 netfid, FILE_UNIX_BASIC_INFO *pFindData); 201 u16 netfid, FILE_UNIX_BASIC_INFO *pFindData);
193extern int CIFSSMBUnixQPathInfo(const int xid, 202extern int CIFSSMBUnixQPathInfo(const int xid,
194 struct cifsTconInfo *tcon, 203 struct cifs_tcon *tcon,
195 const unsigned char *searchName, 204 const unsigned char *searchName,
196 FILE_UNIX_BASIC_INFO *pFindData, 205 FILE_UNIX_BASIC_INFO *pFindData,
197 const struct nls_table *nls_codepage, int remap); 206 const struct nls_table *nls_codepage, int remap);
198 207
199extern int CIFSGetDFSRefer(const int xid, struct cifsSesInfo *ses, 208extern int CIFSGetDFSRefer(const int xid, struct cifs_ses *ses,
200 const unsigned char *searchName, 209 const unsigned char *searchName,
201 struct dfs_info3_param **target_nodes, 210 struct dfs_info3_param **target_nodes,
202 unsigned int *number_of_nodes_in_array, 211 unsigned int *number_of_nodes_in_array,
203 const struct nls_table *nls_codepage, int remap); 212 const struct nls_table *nls_codepage, int remap);
204 213
205extern int get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, 214extern int get_dfs_path(int xid, struct cifs_ses *pSesInfo,
206 const char *old_path, 215 const char *old_path,
207 const struct nls_table *nls_codepage, 216 const struct nls_table *nls_codepage,
208 unsigned int *pnum_referrals, 217 unsigned int *pnum_referrals,
209 struct dfs_info3_param **preferrals, 218 struct dfs_info3_param **preferrals,
210 int remap); 219 int remap);
211extern void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, 220extern void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
212 struct super_block *sb, struct smb_vol *vol); 221 struct super_block *sb, struct smb_vol *vol);
213extern int CIFSSMBQFSInfo(const int xid, struct cifsTconInfo *tcon, 222extern int CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon,
214 struct kstatfs *FSData); 223 struct kstatfs *FSData);
215extern int SMBOldQFSInfo(const int xid, struct cifsTconInfo *tcon, 224extern int SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon,
216 struct kstatfs *FSData); 225 struct kstatfs *FSData);
217extern int CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon, 226extern int CIFSSMBSetFSUnixInfo(const int xid, struct cifs_tcon *tcon,
218 __u64 cap); 227 __u64 cap);
219 228
220extern int CIFSSMBQFSAttributeInfo(const int xid, 229extern int CIFSSMBQFSAttributeInfo(const int xid,
221 struct cifsTconInfo *tcon); 230 struct cifs_tcon *tcon);
222extern int CIFSSMBQFSDeviceInfo(const int xid, struct cifsTconInfo *tcon); 231extern int CIFSSMBQFSDeviceInfo(const int xid, struct cifs_tcon *tcon);
223extern int CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon); 232extern int CIFSSMBQFSUnixInfo(const int xid, struct cifs_tcon *tcon);
224extern int CIFSSMBQFSPosixInfo(const int xid, struct cifsTconInfo *tcon, 233extern int CIFSSMBQFSPosixInfo(const int xid, struct cifs_tcon *tcon,
225 struct kstatfs *FSData); 234 struct kstatfs *FSData);
226 235
227extern int CIFSSMBSetPathInfo(const int xid, struct cifsTconInfo *tcon, 236extern int CIFSSMBSetPathInfo(const int xid, struct cifs_tcon *tcon,
228 const char *fileName, const FILE_BASIC_INFO *data, 237 const char *fileName, const FILE_BASIC_INFO *data,
229 const struct nls_table *nls_codepage, 238 const struct nls_table *nls_codepage,
230 int remap_special_chars); 239 int remap_special_chars);
231extern int CIFSSMBSetFileInfo(const int xid, struct cifsTconInfo *tcon, 240extern int CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon,
232 const FILE_BASIC_INFO *data, __u16 fid, 241 const FILE_BASIC_INFO *data, __u16 fid,
233 __u32 pid_of_opener); 242 __u32 pid_of_opener);
234extern int CIFSSMBSetFileDisposition(const int xid, struct cifsTconInfo *tcon, 243extern int CIFSSMBSetFileDisposition(const int xid, struct cifs_tcon *tcon,
235 bool delete_file, __u16 fid, __u32 pid_of_opener); 244 bool delete_file, __u16 fid, __u32 pid_of_opener);
236#if 0 245#if 0
237extern int CIFSSMBSetAttrLegacy(int xid, struct cifsTconInfo *tcon, 246extern int CIFSSMBSetAttrLegacy(int xid, struct cifs_tcon *tcon,
238 char *fileName, __u16 dos_attributes, 247 char *fileName, __u16 dos_attributes,
239 const struct nls_table *nls_codepage); 248 const struct nls_table *nls_codepage);
240#endif /* possibly unneeded function */ 249#endif /* possibly unneeded function */
241extern int CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon, 250extern int CIFSSMBSetEOF(const int xid, struct cifs_tcon *tcon,
242 const char *fileName, __u64 size, 251 const char *fileName, __u64 size,
243 bool setAllocationSizeFlag, 252 bool setAllocationSizeFlag,
244 const struct nls_table *nls_codepage, 253 const struct nls_table *nls_codepage,
245 int remap_special_chars); 254 int remap_special_chars);
246extern int CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, 255extern int CIFSSMBSetFileSize(const int xid, struct cifs_tcon *tcon,
247 __u64 size, __u16 fileHandle, __u32 opener_pid, 256 __u64 size, __u16 fileHandle, __u32 opener_pid,
248 bool AllocSizeFlag); 257 bool AllocSizeFlag);
249 258
@@ -257,120 +266,116 @@ struct cifs_unix_set_info_args {
257 dev_t device; 266 dev_t device;
258}; 267};
259 268
260extern int CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon, 269extern int CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon,
261 const struct cifs_unix_set_info_args *args, 270 const struct cifs_unix_set_info_args *args,
262 u16 fid, u32 pid_of_opener); 271 u16 fid, u32 pid_of_opener);
263 272
264extern int CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *pTcon, 273extern int CIFSSMBUnixSetPathInfo(const int xid, struct cifs_tcon *pTcon,
265 char *fileName, 274 char *fileName,
266 const struct cifs_unix_set_info_args *args, 275 const struct cifs_unix_set_info_args *args,
267 const struct nls_table *nls_codepage, 276 const struct nls_table *nls_codepage,
268 int remap_special_chars); 277 int remap_special_chars);
269 278
270extern int CIFSSMBMkDir(const int xid, struct cifsTconInfo *tcon, 279extern int CIFSSMBMkDir(const int xid, struct cifs_tcon *tcon,
271 const char *newName, 280 const char *newName,
272 const struct nls_table *nls_codepage, 281 const struct nls_table *nls_codepage,
273 int remap_special_chars); 282 int remap_special_chars);
274extern int CIFSSMBRmDir(const int xid, struct cifsTconInfo *tcon, 283extern int CIFSSMBRmDir(const int xid, struct cifs_tcon *tcon,
275 const char *name, const struct nls_table *nls_codepage, 284 const char *name, const struct nls_table *nls_codepage,
276 int remap_special_chars); 285 int remap_special_chars);
277extern int CIFSPOSIXDelFile(const int xid, struct cifsTconInfo *tcon, 286extern int CIFSPOSIXDelFile(const int xid, struct cifs_tcon *tcon,
278 const char *name, __u16 type, 287 const char *name, __u16 type,
279 const struct nls_table *nls_codepage, 288 const struct nls_table *nls_codepage,
280 int remap_special_chars); 289 int remap_special_chars);
281extern int CIFSSMBDelFile(const int xid, struct cifsTconInfo *tcon, 290extern int CIFSSMBDelFile(const int xid, struct cifs_tcon *tcon,
282 const char *name, 291 const char *name,
283 const struct nls_table *nls_codepage, 292 const struct nls_table *nls_codepage,
284 int remap_special_chars); 293 int remap_special_chars);
285extern int CIFSSMBRename(const int xid, struct cifsTconInfo *tcon, 294extern int CIFSSMBRename(const int xid, struct cifs_tcon *tcon,
286 const char *fromName, const char *toName, 295 const char *fromName, const char *toName,
287 const struct nls_table *nls_codepage, 296 const struct nls_table *nls_codepage,
288 int remap_special_chars); 297 int remap_special_chars);
289extern int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon, 298extern int CIFSSMBRenameOpenFile(const int xid, struct cifs_tcon *pTcon,
290 int netfid, const char *target_name, 299 int netfid, const char *target_name,
291 const struct nls_table *nls_codepage, 300 const struct nls_table *nls_codepage,
292 int remap_special_chars); 301 int remap_special_chars);
293extern int CIFSCreateHardLink(const int xid, 302extern int CIFSCreateHardLink(const int xid,
294 struct cifsTconInfo *tcon, 303 struct cifs_tcon *tcon,
295 const char *fromName, const char *toName, 304 const char *fromName, const char *toName,
296 const struct nls_table *nls_codepage, 305 const struct nls_table *nls_codepage,
297 int remap_special_chars); 306 int remap_special_chars);
298extern int CIFSUnixCreateHardLink(const int xid, 307extern int CIFSUnixCreateHardLink(const int xid,
299 struct cifsTconInfo *tcon, 308 struct cifs_tcon *tcon,
300 const char *fromName, const char *toName, 309 const char *fromName, const char *toName,
301 const struct nls_table *nls_codepage, 310 const struct nls_table *nls_codepage,
302 int remap_special_chars); 311 int remap_special_chars);
303extern int CIFSUnixCreateSymLink(const int xid, 312extern int CIFSUnixCreateSymLink(const int xid,
304 struct cifsTconInfo *tcon, 313 struct cifs_tcon *tcon,
305 const char *fromName, const char *toName, 314 const char *fromName, const char *toName,
306 const struct nls_table *nls_codepage); 315 const struct nls_table *nls_codepage);
307extern int CIFSSMBUnixQuerySymLink(const int xid, 316extern int CIFSSMBUnixQuerySymLink(const int xid,
308 struct cifsTconInfo *tcon, 317 struct cifs_tcon *tcon,
309 const unsigned char *searchName, char **syminfo, 318 const unsigned char *searchName, char **syminfo,
310 const struct nls_table *nls_codepage); 319 const struct nls_table *nls_codepage);
311#ifdef CONFIG_CIFS_SYMLINK_EXPERIMENTAL 320#ifdef CONFIG_CIFS_SYMLINK_EXPERIMENTAL
312extern int CIFSSMBQueryReparseLinkInfo(const int xid, 321extern int CIFSSMBQueryReparseLinkInfo(const int xid,
313 struct cifsTconInfo *tcon, 322 struct cifs_tcon *tcon,
314 const unsigned char *searchName, 323 const unsigned char *searchName,
315 char *symlinkinfo, const int buflen, __u16 fid, 324 char *symlinkinfo, const int buflen, __u16 fid,
316 const struct nls_table *nls_codepage); 325 const struct nls_table *nls_codepage);
317#endif /* temporarily unused until cifs_symlink fixed */ 326#endif /* temporarily unused until cifs_symlink fixed */
318extern int CIFSSMBOpen(const int xid, struct cifsTconInfo *tcon, 327extern int CIFSSMBOpen(const int xid, struct cifs_tcon *tcon,
319 const char *fileName, const int disposition, 328 const char *fileName, const int disposition,
320 const int access_flags, const int omode, 329 const int access_flags, const int omode,
321 __u16 *netfid, int *pOplock, FILE_ALL_INFO *, 330 __u16 *netfid, int *pOplock, FILE_ALL_INFO *,
322 const struct nls_table *nls_codepage, int remap); 331 const struct nls_table *nls_codepage, int remap);
323extern int SMBLegacyOpen(const int xid, struct cifsTconInfo *tcon, 332extern int SMBLegacyOpen(const int xid, struct cifs_tcon *tcon,
324 const char *fileName, const int disposition, 333 const char *fileName, const int disposition,
325 const int access_flags, const int omode, 334 const int access_flags, const int omode,
326 __u16 *netfid, int *pOplock, FILE_ALL_INFO *, 335 __u16 *netfid, int *pOplock, FILE_ALL_INFO *,
327 const struct nls_table *nls_codepage, int remap); 336 const struct nls_table *nls_codepage, int remap);
328extern int CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon, 337extern int CIFSPOSIXCreate(const int xid, struct cifs_tcon *tcon,
329 u32 posix_flags, __u64 mode, __u16 *netfid, 338 u32 posix_flags, __u64 mode, __u16 *netfid,
330 FILE_UNIX_BASIC_INFO *pRetData, 339 FILE_UNIX_BASIC_INFO *pRetData,
331 __u32 *pOplock, const char *name, 340 __u32 *pOplock, const char *name,
332 const struct nls_table *nls_codepage, int remap); 341 const struct nls_table *nls_codepage, int remap);
333extern int CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, 342extern int CIFSSMBClose(const int xid, struct cifs_tcon *tcon,
334 const int smb_file_id); 343 const int smb_file_id);
335 344
336extern int CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon, 345extern int CIFSSMBFlush(const int xid, struct cifs_tcon *tcon,
337 const int smb_file_id); 346 const int smb_file_id);
338 347
339extern int CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, 348extern int CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms,
340 const int netfid, unsigned int count, 349 unsigned int *nbytes, char **buf,
341 const __u64 lseek, unsigned int *nbytes, char **buf,
342 int *return_buf_type); 350 int *return_buf_type);
343extern int CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon, 351extern int CIFSSMBWrite(const int xid, struct cifs_io_parms *io_parms,
344 const int netfid, const unsigned int count, 352 unsigned int *nbytes, const char *buf,
345 const __u64 lseek, unsigned int *nbytes, 353 const char __user *ubuf, const int long_op);
346 const char *buf, const char __user *ubuf, 354extern int CIFSSMBWrite2(const int xid, struct cifs_io_parms *io_parms,
355 unsigned int *nbytes, struct kvec *iov, const int nvec,
347 const int long_op); 356 const int long_op);
348extern int CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon, 357extern int CIFSGetSrvInodeNumber(const int xid, struct cifs_tcon *tcon,
349 const int netfid, const unsigned int count,
350 const __u64 offset, unsigned int *nbytes,
351 struct kvec *iov, const int nvec, const int long_op);
352extern int CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
353 const unsigned char *searchName, __u64 *inode_number, 358 const unsigned char *searchName, __u64 *inode_number,
354 const struct nls_table *nls_codepage, 359 const struct nls_table *nls_codepage,
355 int remap_special_chars); 360 int remap_special_chars);
356 361
357extern int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, 362extern int CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
358 const __u16 netfid, const __u64 len, 363 const __u16 netfid, const __u64 len,
359 const __u64 offset, const __u32 numUnlock, 364 const __u64 offset, const __u32 numUnlock,
360 const __u32 numLock, const __u8 lockType, 365 const __u32 numLock, const __u8 lockType,
361 const bool waitFlag, const __u8 oplock_level); 366 const bool waitFlag, const __u8 oplock_level);
362extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, 367extern int CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
363 const __u16 smb_file_id, const int get_flag, 368 const __u16 smb_file_id, const int get_flag,
364 const __u64 len, struct file_lock *, 369 const __u64 len, struct file_lock *,
365 const __u16 lock_type, const bool waitFlag); 370 const __u16 lock_type, const bool waitFlag);
366extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon); 371extern int CIFSSMBTDis(const int xid, struct cifs_tcon *tcon);
367extern int CIFSSMBEcho(struct TCP_Server_Info *server); 372extern int CIFSSMBEcho(struct TCP_Server_Info *server);
368extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses); 373extern int CIFSSMBLogoff(const int xid, struct cifs_ses *ses);
369 374
370extern struct cifsSesInfo *sesInfoAlloc(void); 375extern struct cifs_ses *sesInfoAlloc(void);
371extern void sesInfoFree(struct cifsSesInfo *); 376extern void sesInfoFree(struct cifs_ses *);
372extern struct cifsTconInfo *tconInfoAlloc(void); 377extern struct cifs_tcon *tconInfoAlloc(void);
373extern void tconInfoFree(struct cifsTconInfo *); 378extern void tconInfoFree(struct cifs_tcon *);
374 379
375extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *); 380extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
376extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *, 381extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
@@ -379,51 +384,51 @@ extern int cifs_verify_signature(struct smb_hdr *,
379 struct TCP_Server_Info *server, 384 struct TCP_Server_Info *server,
380 __u32 expected_sequence_number); 385 __u32 expected_sequence_number);
381extern int SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *); 386extern int SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *);
382extern int setup_ntlm_response(struct cifsSesInfo *); 387extern int setup_ntlm_response(struct cifs_ses *);
383extern int setup_ntlmv2_rsp(struct cifsSesInfo *, const struct nls_table *); 388extern int setup_ntlmv2_rsp(struct cifs_ses *, const struct nls_table *);
384extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *); 389extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *);
385extern void cifs_crypto_shash_release(struct TCP_Server_Info *); 390extern void cifs_crypto_shash_release(struct TCP_Server_Info *);
386extern int calc_seckey(struct cifsSesInfo *); 391extern int calc_seckey(struct cifs_ses *);
387 392
388#ifdef CONFIG_CIFS_WEAK_PW_HASH 393#ifdef CONFIG_CIFS_WEAK_PW_HASH
389extern int calc_lanman_hash(const char *password, const char *cryptkey, 394extern int calc_lanman_hash(const char *password, const char *cryptkey,
390 bool encrypt, char *lnm_session_key); 395 bool encrypt, char *lnm_session_key);
391#endif /* CIFS_WEAK_PW_HASH */ 396#endif /* CIFS_WEAK_PW_HASH */
392#ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */ 397#ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
393extern int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon, 398extern int CIFSSMBNotify(const int xid, struct cifs_tcon *tcon,
394 const int notify_subdirs, const __u16 netfid, 399 const int notify_subdirs, const __u16 netfid,
395 __u32 filter, struct file *file, int multishot, 400 __u32 filter, struct file *file, int multishot,
396 const struct nls_table *nls_codepage); 401 const struct nls_table *nls_codepage);
397#endif /* was needed for dnotify, and will be needed for inotify when VFS fix */ 402#endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
398extern int CIFSSMBCopy(int xid, 403extern int CIFSSMBCopy(int xid,
399 struct cifsTconInfo *source_tcon, 404 struct cifs_tcon *source_tcon,
400 const char *fromName, 405 const char *fromName,
401 const __u16 target_tid, 406 const __u16 target_tid,
402 const char *toName, const int flags, 407 const char *toName, const int flags,
403 const struct nls_table *nls_codepage, 408 const struct nls_table *nls_codepage,
404 int remap_special_chars); 409 int remap_special_chars);
405extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon, 410extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifs_tcon *tcon,
406 const unsigned char *searchName, 411 const unsigned char *searchName,
407 const unsigned char *ea_name, char *EAData, 412 const unsigned char *ea_name, char *EAData,
408 size_t bufsize, const struct nls_table *nls_codepage, 413 size_t bufsize, const struct nls_table *nls_codepage,
409 int remap_special_chars); 414 int remap_special_chars);
410extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon, 415extern int CIFSSMBSetEA(const int xid, struct cifs_tcon *tcon,
411 const char *fileName, const char *ea_name, 416 const char *fileName, const char *ea_name,
412 const void *ea_value, const __u16 ea_value_len, 417 const void *ea_value, const __u16 ea_value_len,
413 const struct nls_table *nls_codepage, int remap_special_chars); 418 const struct nls_table *nls_codepage, int remap_special_chars);
414extern int CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, 419extern int CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon,
415 __u16 fid, struct cifs_ntsd **acl_inf, __u32 *buflen); 420 __u16 fid, struct cifs_ntsd **acl_inf, __u32 *buflen);
416extern int CIFSSMBSetCIFSACL(const int, struct cifsTconInfo *, __u16, 421extern int CIFSSMBSetCIFSACL(const int, struct cifs_tcon *, __u16,
417 struct cifs_ntsd *, __u32); 422 struct cifs_ntsd *, __u32);
418extern int CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon, 423extern int CIFSSMBGetPosixACL(const int xid, struct cifs_tcon *tcon,
419 const unsigned char *searchName, 424 const unsigned char *searchName,
420 char *acl_inf, const int buflen, const int acl_type, 425 char *acl_inf, const int buflen, const int acl_type,
421 const struct nls_table *nls_codepage, int remap_special_chars); 426 const struct nls_table *nls_codepage, int remap_special_chars);
422extern int CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon, 427extern int CIFSSMBSetPosixACL(const int xid, struct cifs_tcon *tcon,
423 const unsigned char *fileName, 428 const unsigned char *fileName,
424 const char *local_acl, const int buflen, const int acl_type, 429 const char *local_acl, const int buflen, const int acl_type,
425 const struct nls_table *nls_codepage, int remap_special_chars); 430 const struct nls_table *nls_codepage, int remap_special_chars);
426extern int CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon, 431extern int CIFSGetExtAttr(const int xid, struct cifs_tcon *tcon,
427 const int netfid, __u64 *pExtAttrBits, __u64 *pMask); 432 const int netfid, __u64 *pExtAttrBits, __u64 *pMask);
428extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb); 433extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb);
429extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr); 434extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr);
@@ -434,4 +439,22 @@ extern int mdfour(unsigned char *, unsigned char *, int);
434extern int E_md4hash(const unsigned char *passwd, unsigned char *p16); 439extern int E_md4hash(const unsigned char *passwd, unsigned char *p16);
435extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8, 440extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8,
436 unsigned char *p24); 441 unsigned char *p24);
442
443/* asynchronous write support */
444struct cifs_writedata {
445 struct kref refcount;
446 enum writeback_sync_modes sync_mode;
447 struct work_struct work;
448 struct cifsFileInfo *cfile;
449 __u64 offset;
450 unsigned int bytes;
451 int result;
452 unsigned int nr_pages;
453 struct page *pages[1];
454};
455
456int cifs_async_writev(struct cifs_writedata *wdata);
457struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages);
458void cifs_writedata_release(struct kref *refcount);
459
437#endif /* _CIFSPROTO_H */ 460#endif /* _CIFSPROTO_H */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 83df937b814e..1a9fe7f816d1 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -32,6 +32,7 @@
32#include <linux/vfs.h> 32#include <linux/vfs.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/posix_acl_xattr.h> 34#include <linux/posix_acl_xattr.h>
35#include <linux/pagemap.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
36#include "cifspdu.h" 37#include "cifspdu.h"
37#include "cifsglob.h" 38#include "cifsglob.h"
@@ -84,7 +85,7 @@ static struct {
84 85
85/* Mark as invalid, all open files on tree connections since they 86/* Mark as invalid, all open files on tree connections since they
86 were closed when session to server was lost */ 87 were closed when session to server was lost */
87static void mark_open_files_invalid(struct cifsTconInfo *pTcon) 88static void mark_open_files_invalid(struct cifs_tcon *pTcon)
88{ 89{
89 struct cifsFileInfo *open_file = NULL; 90 struct cifsFileInfo *open_file = NULL;
90 struct list_head *tmp; 91 struct list_head *tmp;
@@ -104,10 +105,10 @@ static void mark_open_files_invalid(struct cifsTconInfo *pTcon)
104 105
105/* reconnect the socket, tcon, and smb session if needed */ 106/* reconnect the socket, tcon, and smb session if needed */
106static int 107static int
107cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) 108cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
108{ 109{
109 int rc = 0; 110 int rc = 0;
110 struct cifsSesInfo *ses; 111 struct cifs_ses *ses;
111 struct TCP_Server_Info *server; 112 struct TCP_Server_Info *server;
112 struct nls_table *nls_codepage; 113 struct nls_table *nls_codepage;
113 114
@@ -226,7 +227,7 @@ out:
226 SMB information in the SMB header. If the return code is zero, this 227 SMB information in the SMB header. If the return code is zero, this
227 function must have filled in request_buf pointer */ 228 function must have filled in request_buf pointer */
228static int 229static int
229small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, 230small_smb_init(int smb_command, int wct, struct cifs_tcon *tcon,
230 void **request_buf) 231 void **request_buf)
231{ 232{
232 int rc; 233 int rc;
@@ -252,7 +253,7 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
252 253
253int 254int
254small_smb_init_no_tc(const int smb_command, const int wct, 255small_smb_init_no_tc(const int smb_command, const int wct,
255 struct cifsSesInfo *ses, void **request_buf) 256 struct cifs_ses *ses, void **request_buf)
256{ 257{
257 int rc; 258 int rc;
258 struct smb_hdr *buffer; 259 struct smb_hdr *buffer;
@@ -278,7 +279,7 @@ small_smb_init_no_tc(const int smb_command, const int wct,
278 279
279/* If the return code is zero, this function must fill in request_buf pointer */ 280/* If the return code is zero, this function must fill in request_buf pointer */
280static int 281static int
281__smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, 282__smb_init(int smb_command, int wct, struct cifs_tcon *tcon,
282 void **request_buf, void **response_buf) 283 void **request_buf, void **response_buf)
283{ 284{
284 *request_buf = cifs_buf_get(); 285 *request_buf = cifs_buf_get();
@@ -304,7 +305,7 @@ __smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
304 305
305/* If the return code is zero, this function must fill in request_buf pointer */ 306/* If the return code is zero, this function must fill in request_buf pointer */
306static int 307static int
307smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, 308smb_init(int smb_command, int wct, struct cifs_tcon *tcon,
308 void **request_buf, void **response_buf) 309 void **request_buf, void **response_buf)
309{ 310{
310 int rc; 311 int rc;
@@ -317,7 +318,7 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
317} 318}
318 319
319static int 320static int
320smb_init_no_reconnect(int smb_command, int wct, struct cifsTconInfo *tcon, 321smb_init_no_reconnect(int smb_command, int wct, struct cifs_tcon *tcon,
321 void **request_buf, void **response_buf) 322 void **request_buf, void **response_buf)
322{ 323{
323 if (tcon->ses->need_reconnect || tcon->need_reconnect) 324 if (tcon->ses->need_reconnect || tcon->need_reconnect)
@@ -366,7 +367,7 @@ static inline void inc_rfc1001_len(void *pSMB, int count)
366} 367}
367 368
368int 369int
369CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) 370CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
370{ 371{
371 NEGOTIATE_REQ *pSMB; 372 NEGOTIATE_REQ *pSMB;
372 NEGOTIATE_RSP *pSMBr; 373 NEGOTIATE_RSP *pSMBr;
@@ -450,7 +451,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
450 rc = -EOPNOTSUPP; 451 rc = -EOPNOTSUPP;
451 goto neg_err_exit; 452 goto neg_err_exit;
452 } 453 }
453 server->secMode = (__u8)le16_to_cpu(rsp->SecurityMode); 454 server->sec_mode = (__u8)le16_to_cpu(rsp->SecurityMode);
454 server->maxReq = le16_to_cpu(rsp->MaxMpxCount); 455 server->maxReq = le16_to_cpu(rsp->MaxMpxCount);
455 server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize), 456 server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize),
456 (__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE); 457 (__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
@@ -504,7 +505,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
504 cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) { 505 cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) {
505 memcpy(ses->server->cryptkey, rsp->EncryptionKey, 506 memcpy(ses->server->cryptkey, rsp->EncryptionKey,
506 CIFS_CRYPTO_KEY_SIZE); 507 CIFS_CRYPTO_KEY_SIZE);
507 } else if (server->secMode & SECMODE_PW_ENCRYPT) { 508 } else if (server->sec_mode & SECMODE_PW_ENCRYPT) {
508 rc = -EIO; /* need cryptkey unless plain text */ 509 rc = -EIO; /* need cryptkey unless plain text */
509 goto neg_err_exit; 510 goto neg_err_exit;
510 } 511 }
@@ -526,11 +527,11 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
526 goto neg_err_exit; 527 goto neg_err_exit;
527 } 528 }
528 /* else wct == 17 NTLM */ 529 /* else wct == 17 NTLM */
529 server->secMode = pSMBr->SecurityMode; 530 server->sec_mode = pSMBr->SecurityMode;
530 if ((server->secMode & SECMODE_USER) == 0) 531 if ((server->sec_mode & SECMODE_USER) == 0)
531 cFYI(1, "share mode security"); 532 cFYI(1, "share mode security");
532 533
533 if ((server->secMode & SECMODE_PW_ENCRYPT) == 0) 534 if ((server->sec_mode & SECMODE_PW_ENCRYPT) == 0)
534#ifdef CONFIG_CIFS_WEAK_PW_HASH 535#ifdef CONFIG_CIFS_WEAK_PW_HASH
535 if ((secFlags & CIFSSEC_MAY_PLNTXT) == 0) 536 if ((secFlags & CIFSSEC_MAY_PLNTXT) == 0)
536#endif /* CIFS_WEAK_PW_HASH */ 537#endif /* CIFS_WEAK_PW_HASH */
@@ -570,18 +571,10 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
570 if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) { 571 if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) {
571 memcpy(ses->server->cryptkey, pSMBr->u.EncryptionKey, 572 memcpy(ses->server->cryptkey, pSMBr->u.EncryptionKey,
572 CIFS_CRYPTO_KEY_SIZE); 573 CIFS_CRYPTO_KEY_SIZE);
573 } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) 574 } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC ||
574 && (pSMBr->EncryptionKeyLength == 0)) { 575 server->capabilities & CAP_EXTENDED_SECURITY) &&
576 (pSMBr->EncryptionKeyLength == 0)) {
575 /* decode security blob */ 577 /* decode security blob */
576 } else if (server->secMode & SECMODE_PW_ENCRYPT) {
577 rc = -EIO; /* no crypt key only if plain text pwd */
578 goto neg_err_exit;
579 }
580
581 /* BB might be helpful to save off the domain of server here */
582
583 if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) &&
584 (server->capabilities & CAP_EXTENDED_SECURITY)) {
585 count = get_bcc(&pSMBr->hdr); 578 count = get_bcc(&pSMBr->hdr);
586 if (count < 16) { 579 if (count < 16) {
587 rc = -EIO; 580 rc = -EIO;
@@ -624,6 +617,9 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
624 } else 617 } else
625 rc = -EOPNOTSUPP; 618 rc = -EOPNOTSUPP;
626 } 619 }
620 } else if (server->sec_mode & SECMODE_PW_ENCRYPT) {
621 rc = -EIO; /* no crypt key only if plain text pwd */
622 goto neg_err_exit;
627 } else 623 } else
628 server->capabilities &= ~CAP_EXTENDED_SECURITY; 624 server->capabilities &= ~CAP_EXTENDED_SECURITY;
629 625
@@ -634,27 +630,27 @@ signing_check:
634 /* MUST_SIGN already includes the MAY_SIGN FLAG 630 /* MUST_SIGN already includes the MAY_SIGN FLAG
635 so if this is zero it means that signing is disabled */ 631 so if this is zero it means that signing is disabled */
636 cFYI(1, "Signing disabled"); 632 cFYI(1, "Signing disabled");
637 if (server->secMode & SECMODE_SIGN_REQUIRED) { 633 if (server->sec_mode & SECMODE_SIGN_REQUIRED) {
638 cERROR(1, "Server requires " 634 cERROR(1, "Server requires "
639 "packet signing to be enabled in " 635 "packet signing to be enabled in "
640 "/proc/fs/cifs/SecurityFlags."); 636 "/proc/fs/cifs/SecurityFlags.");
641 rc = -EOPNOTSUPP; 637 rc = -EOPNOTSUPP;
642 } 638 }
643 server->secMode &= 639 server->sec_mode &=
644 ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); 640 ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
645 } else if ((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) { 641 } else if ((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) {
646 /* signing required */ 642 /* signing required */
647 cFYI(1, "Must sign - secFlags 0x%x", secFlags); 643 cFYI(1, "Must sign - secFlags 0x%x", secFlags);
648 if ((server->secMode & 644 if ((server->sec_mode &
649 (SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) { 645 (SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) {
650 cERROR(1, "signing required but server lacks support"); 646 cERROR(1, "signing required but server lacks support");
651 rc = -EOPNOTSUPP; 647 rc = -EOPNOTSUPP;
652 } else 648 } else
653 server->secMode |= SECMODE_SIGN_REQUIRED; 649 server->sec_mode |= SECMODE_SIGN_REQUIRED;
654 } else { 650 } else {
655 /* signing optional ie CIFSSEC_MAY_SIGN */ 651 /* signing optional ie CIFSSEC_MAY_SIGN */
656 if ((server->secMode & SECMODE_SIGN_REQUIRED) == 0) 652 if ((server->sec_mode & SECMODE_SIGN_REQUIRED) == 0)
657 server->secMode &= 653 server->sec_mode &=
658 ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); 654 ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
659 } 655 }
660 656
@@ -666,7 +662,7 @@ neg_err_exit:
666} 662}
667 663
668int 664int
669CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon) 665CIFSSMBTDis(const int xid, struct cifs_tcon *tcon)
670{ 666{
671 struct smb_hdr *smb_buffer; 667 struct smb_hdr *smb_buffer;
672 int rc = 0; 668 int rc = 0;
@@ -725,6 +721,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
725{ 721{
726 ECHO_REQ *smb; 722 ECHO_REQ *smb;
727 int rc = 0; 723 int rc = 0;
724 struct kvec iov;
728 725
729 cFYI(1, "In echo request"); 726 cFYI(1, "In echo request");
730 727
@@ -739,9 +736,10 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
739 put_bcc(1, &smb->hdr); 736 put_bcc(1, &smb->hdr);
740 smb->Data[0] = 'a'; 737 smb->Data[0] = 'a';
741 inc_rfc1001_len(smb, 3); 738 inc_rfc1001_len(smb, 3);
739 iov.iov_base = smb;
740 iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4;
742 741
743 rc = cifs_call_async(server, (struct smb_hdr *)smb, 742 rc = cifs_call_async(server, &iov, 1, cifs_echo_callback, server, true);
744 cifs_echo_callback, server);
745 if (rc) 743 if (rc)
746 cFYI(1, "Echo request failed: %d", rc); 744 cFYI(1, "Echo request failed: %d", rc);
747 745
@@ -751,7 +749,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
751} 749}
752 750
753int 751int
754CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses) 752CIFSSMBLogoff(const int xid, struct cifs_ses *ses)
755{ 753{
756 LOGOFF_ANDX_REQ *pSMB; 754 LOGOFF_ANDX_REQ *pSMB;
757 int rc = 0; 755 int rc = 0;
@@ -778,7 +776,7 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
778 776
779 pSMB->hdr.Mid = GetNextMid(ses->server); 777 pSMB->hdr.Mid = GetNextMid(ses->server);
780 778
781 if (ses->server->secMode & 779 if (ses->server->sec_mode &
782 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 780 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
783 pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 781 pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
784 782
@@ -798,7 +796,7 @@ session_already_dead:
798} 796}
799 797
800int 798int
801CIFSPOSIXDelFile(const int xid, struct cifsTconInfo *tcon, const char *fileName, 799CIFSPOSIXDelFile(const int xid, struct cifs_tcon *tcon, const char *fileName,
802 __u16 type, const struct nls_table *nls_codepage, int remap) 800 __u16 type, const struct nls_table *nls_codepage, int remap)
803{ 801{
804 TRANSACTION2_SPI_REQ *pSMB = NULL; 802 TRANSACTION2_SPI_REQ *pSMB = NULL;
@@ -873,7 +871,7 @@ PsxDelete:
873} 871}
874 872
875int 873int
876CIFSSMBDelFile(const int xid, struct cifsTconInfo *tcon, const char *fileName, 874CIFSSMBDelFile(const int xid, struct cifs_tcon *tcon, const char *fileName,
877 const struct nls_table *nls_codepage, int remap) 875 const struct nls_table *nls_codepage, int remap)
878{ 876{
879 DELETE_FILE_REQ *pSMB = NULL; 877 DELETE_FILE_REQ *pSMB = NULL;
@@ -918,7 +916,7 @@ DelFileRetry:
918} 916}
919 917
920int 918int
921CIFSSMBRmDir(const int xid, struct cifsTconInfo *tcon, const char *dirName, 919CIFSSMBRmDir(const int xid, struct cifs_tcon *tcon, const char *dirName,
922 const struct nls_table *nls_codepage, int remap) 920 const struct nls_table *nls_codepage, int remap)
923{ 921{
924 DELETE_DIRECTORY_REQ *pSMB = NULL; 922 DELETE_DIRECTORY_REQ *pSMB = NULL;
@@ -961,7 +959,7 @@ RmDirRetry:
961} 959}
962 960
963int 961int
964CIFSSMBMkDir(const int xid, struct cifsTconInfo *tcon, 962CIFSSMBMkDir(const int xid, struct cifs_tcon *tcon,
965 const char *name, const struct nls_table *nls_codepage, int remap) 963 const char *name, const struct nls_table *nls_codepage, int remap)
966{ 964{
967 int rc = 0; 965 int rc = 0;
@@ -1004,7 +1002,7 @@ MkDirRetry:
1004} 1002}
1005 1003
1006int 1004int
1007CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon, __u32 posix_flags, 1005CIFSPOSIXCreate(const int xid, struct cifs_tcon *tcon, __u32 posix_flags,
1008 __u64 mode, __u16 *netfid, FILE_UNIX_BASIC_INFO *pRetData, 1006 __u64 mode, __u16 *netfid, FILE_UNIX_BASIC_INFO *pRetData,
1009 __u32 *pOplock, const char *name, 1007 __u32 *pOplock, const char *name,
1010 const struct nls_table *nls_codepage, int remap) 1008 const struct nls_table *nls_codepage, int remap)
@@ -1170,7 +1168,7 @@ access_flags_to_smbopen_mode(const int access_flags)
1170} 1168}
1171 1169
1172int 1170int
1173SMBLegacyOpen(const int xid, struct cifsTconInfo *tcon, 1171SMBLegacyOpen(const int xid, struct cifs_tcon *tcon,
1174 const char *fileName, const int openDisposition, 1172 const char *fileName, const int openDisposition,
1175 const int access_flags, const int create_options, __u16 *netfid, 1173 const int access_flags, const int create_options, __u16 *netfid,
1176 int *pOplock, FILE_ALL_INFO *pfile_info, 1174 int *pOplock, FILE_ALL_INFO *pfile_info,
@@ -1277,7 +1275,7 @@ OldOpenRetry:
1277} 1275}
1278 1276
1279int 1277int
1280CIFSSMBOpen(const int xid, struct cifsTconInfo *tcon, 1278CIFSSMBOpen(const int xid, struct cifs_tcon *tcon,
1281 const char *fileName, const int openDisposition, 1279 const char *fileName, const int openDisposition,
1282 const int access_flags, const int create_options, __u16 *netfid, 1280 const int access_flags, const int create_options, __u16 *netfid,
1283 int *pOplock, FILE_ALL_INFO *pfile_info, 1281 int *pOplock, FILE_ALL_INFO *pfile_info,
@@ -1379,8 +1377,7 @@ openRetry:
1379} 1377}
1380 1378
1381int 1379int
1382CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid, 1380CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes,
1383 const unsigned int count, const __u64 lseek, unsigned int *nbytes,
1384 char **buf, int *pbuf_type) 1381 char **buf, int *pbuf_type)
1385{ 1382{
1386 int rc = -EACCES; 1383 int rc = -EACCES;
@@ -1390,13 +1387,18 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
1390 int wct; 1387 int wct;
1391 int resp_buf_type = 0; 1388 int resp_buf_type = 0;
1392 struct kvec iov[1]; 1389 struct kvec iov[1];
1390 __u32 pid = io_parms->pid;
1391 __u16 netfid = io_parms->netfid;
1392 __u64 offset = io_parms->offset;
1393 struct cifs_tcon *tcon = io_parms->tcon;
1394 unsigned int count = io_parms->length;
1393 1395
1394 cFYI(1, "Reading %d bytes on fid %d", count, netfid); 1396 cFYI(1, "Reading %d bytes on fid %d", count, netfid);
1395 if (tcon->ses->capabilities & CAP_LARGE_FILES) 1397 if (tcon->ses->capabilities & CAP_LARGE_FILES)
1396 wct = 12; 1398 wct = 12;
1397 else { 1399 else {
1398 wct = 10; /* old style read */ 1400 wct = 10; /* old style read */
1399 if ((lseek >> 32) > 0) { 1401 if ((offset >> 32) > 0) {
1400 /* can not handle this big offset for old */ 1402 /* can not handle this big offset for old */
1401 return -EIO; 1403 return -EIO;
1402 } 1404 }
@@ -1407,15 +1409,18 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
1407 if (rc) 1409 if (rc)
1408 return rc; 1410 return rc;
1409 1411
1412 pSMB->hdr.Pid = cpu_to_le16((__u16)pid);
1413 pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16));
1414
1410 /* tcon and ses pointer are checked in smb_init */ 1415 /* tcon and ses pointer are checked in smb_init */
1411 if (tcon->ses->server == NULL) 1416 if (tcon->ses->server == NULL)
1412 return -ECONNABORTED; 1417 return -ECONNABORTED;
1413 1418
1414 pSMB->AndXCommand = 0xFF; /* none */ 1419 pSMB->AndXCommand = 0xFF; /* none */
1415 pSMB->Fid = netfid; 1420 pSMB->Fid = netfid;
1416 pSMB->OffsetLow = cpu_to_le32(lseek & 0xFFFFFFFF); 1421 pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF);
1417 if (wct == 12) 1422 if (wct == 12)
1418 pSMB->OffsetHigh = cpu_to_le32(lseek >> 32); 1423 pSMB->OffsetHigh = cpu_to_le32(offset >> 32);
1419 1424
1420 pSMB->Remaining = 0; 1425 pSMB->Remaining = 0;
1421 pSMB->MaxCount = cpu_to_le16(count & 0xFFFF); 1426 pSMB->MaxCount = cpu_to_le16(count & 0xFFFF);
@@ -1484,9 +1489,8 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
1484 1489
1485 1490
1486int 1491int
1487CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon, 1492CIFSSMBWrite(const int xid, struct cifs_io_parms *io_parms,
1488 const int netfid, const unsigned int count, 1493 unsigned int *nbytes, const char *buf,
1489 const __u64 offset, unsigned int *nbytes, const char *buf,
1490 const char __user *ubuf, const int long_op) 1494 const char __user *ubuf, const int long_op)
1491{ 1495{
1492 int rc = -EACCES; 1496 int rc = -EACCES;
@@ -1495,6 +1499,11 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
1495 int bytes_returned, wct; 1499 int bytes_returned, wct;
1496 __u32 bytes_sent; 1500 __u32 bytes_sent;
1497 __u16 byte_count; 1501 __u16 byte_count;
1502 __u32 pid = io_parms->pid;
1503 __u16 netfid = io_parms->netfid;
1504 __u64 offset = io_parms->offset;
1505 struct cifs_tcon *tcon = io_parms->tcon;
1506 unsigned int count = io_parms->length;
1498 1507
1499 *nbytes = 0; 1508 *nbytes = 0;
1500 1509
@@ -1516,6 +1525,10 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
1516 (void **) &pSMBr); 1525 (void **) &pSMBr);
1517 if (rc) 1526 if (rc)
1518 return rc; 1527 return rc;
1528
1529 pSMB->hdr.Pid = cpu_to_le16((__u16)pid);
1530 pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16));
1531
1519 /* tcon and ses pointer are checked in smb_init */ 1532 /* tcon and ses pointer are checked in smb_init */
1520 if (tcon->ses->server == NULL) 1533 if (tcon->ses->server == NULL)
1521 return -ECONNABORTED; 1534 return -ECONNABORTED;
@@ -1602,17 +1615,259 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
1602 return rc; 1615 return rc;
1603} 1616}
1604 1617
1618void
1619cifs_writedata_release(struct kref *refcount)
1620{
1621 struct cifs_writedata *wdata = container_of(refcount,
1622 struct cifs_writedata, refcount);
1623
1624 if (wdata->cfile)
1625 cifsFileInfo_put(wdata->cfile);
1626
1627 kfree(wdata);
1628}
1629
1630/*
1631 * Write failed with a retryable error. Resend the write request. It's also
1632 * possible that the page was redirtied so re-clean the page.
1633 */
1634static void
1635cifs_writev_requeue(struct cifs_writedata *wdata)
1636{
1637 int i, rc;
1638 struct inode *inode = wdata->cfile->dentry->d_inode;
1639
1640 for (i = 0; i < wdata->nr_pages; i++) {
1641 lock_page(wdata->pages[i]);
1642 clear_page_dirty_for_io(wdata->pages[i]);
1643 }
1644
1645 do {
1646 rc = cifs_async_writev(wdata);
1647 } while (rc == -EAGAIN);
1648
1649 for (i = 0; i < wdata->nr_pages; i++) {
1650 if (rc != 0)
1651 SetPageError(wdata->pages[i]);
1652 unlock_page(wdata->pages[i]);
1653 }
1654
1655 mapping_set_error(inode->i_mapping, rc);
1656 kref_put(&wdata->refcount, cifs_writedata_release);
1657}
1658
1659static void
1660cifs_writev_complete(struct work_struct *work)
1661{
1662 struct cifs_writedata *wdata = container_of(work,
1663 struct cifs_writedata, work);
1664 struct inode *inode = wdata->cfile->dentry->d_inode;
1665 int i = 0;
1666
1667 if (wdata->result == 0) {
1668 cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes);
1669 cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink),
1670 wdata->bytes);
1671 } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN)
1672 return cifs_writev_requeue(wdata);
1673
1674 for (i = 0; i < wdata->nr_pages; i++) {
1675 struct page *page = wdata->pages[i];
1676 if (wdata->result == -EAGAIN)
1677 __set_page_dirty_nobuffers(page);
1678 else if (wdata->result < 0)
1679 SetPageError(page);
1680 end_page_writeback(page);
1681 page_cache_release(page);
1682 }
1683 if (wdata->result != -EAGAIN)
1684 mapping_set_error(inode->i_mapping, wdata->result);
1685 kref_put(&wdata->refcount, cifs_writedata_release);
1686}
1687
1688struct cifs_writedata *
1689cifs_writedata_alloc(unsigned int nr_pages)
1690{
1691 struct cifs_writedata *wdata;
1692
1693 /* this would overflow */
1694 if (nr_pages == 0) {
1695 cERROR(1, "%s: called with nr_pages == 0!", __func__);
1696 return NULL;
1697 }
1698
1699 /* writedata + number of page pointers */
1700 wdata = kzalloc(sizeof(*wdata) +
1701 sizeof(struct page *) * (nr_pages - 1), GFP_NOFS);
1702 if (wdata != NULL) {
1703 INIT_WORK(&wdata->work, cifs_writev_complete);
1704 kref_init(&wdata->refcount);
1705 }
1706 return wdata;
1707}
1708
1709/*
1710 * Check the midState and signature on received buffer (if any), and queue the
1711 * workqueue completion task.
1712 */
1713static void
1714cifs_writev_callback(struct mid_q_entry *mid)
1715{
1716 struct cifs_writedata *wdata = mid->callback_data;
1717 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
1718 unsigned int written;
1719 WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf;
1720
1721 switch (mid->midState) {
1722 case MID_RESPONSE_RECEIVED:
1723 wdata->result = cifs_check_receive(mid, tcon->ses->server, 0);
1724 if (wdata->result != 0)
1725 break;
1726
1727 written = le16_to_cpu(smb->CountHigh);
1728 written <<= 16;
1729 written += le16_to_cpu(smb->Count);
1730 /*
1731 * Mask off high 16 bits when bytes written as returned
1732 * by the server is greater than bytes requested by the
1733 * client. OS/2 servers are known to set incorrect
1734 * CountHigh values.
1735 */
1736 if (written > wdata->bytes)
1737 written &= 0xFFFF;
1738
1739 if (written < wdata->bytes)
1740 wdata->result = -ENOSPC;
1741 else
1742 wdata->bytes = written;
1743 break;
1744 case MID_REQUEST_SUBMITTED:
1745 case MID_RETRY_NEEDED:
1746 wdata->result = -EAGAIN;
1747 break;
1748 default:
1749 wdata->result = -EIO;
1750 break;
1751 }
1752
1753 queue_work(system_nrt_wq, &wdata->work);
1754 DeleteMidQEntry(mid);
1755 atomic_dec(&tcon->ses->server->inFlight);
1756 wake_up(&tcon->ses->server->request_q);
1757}
1758
1759/* cifs_async_writev - send an async write, and set up mid to handle result */
1760int
1761cifs_async_writev(struct cifs_writedata *wdata)
1762{
1763 int i, rc = -EACCES;
1764 WRITE_REQ *smb = NULL;
1765 int wct;
1766 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
1767 struct inode *inode = wdata->cfile->dentry->d_inode;
1768 struct kvec *iov = NULL;
1769
1770 if (tcon->ses->capabilities & CAP_LARGE_FILES) {
1771 wct = 14;
1772 } else {
1773 wct = 12;
1774 if (wdata->offset >> 32 > 0) {
1775 /* can not handle big offset for old srv */
1776 return -EIO;
1777 }
1778 }
1779
1780 rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **)&smb);
1781 if (rc)
1782 goto async_writev_out;
1783
1784 /* 1 iov per page + 1 for header */
1785 iov = kzalloc((wdata->nr_pages + 1) * sizeof(*iov), GFP_NOFS);
1786 if (iov == NULL) {
1787 rc = -ENOMEM;
1788 goto async_writev_out;
1789 }
1790
1791 smb->hdr.Pid = cpu_to_le16((__u16)wdata->cfile->pid);
1792 smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->cfile->pid >> 16));
1793
1794 smb->AndXCommand = 0xFF; /* none */
1795 smb->Fid = wdata->cfile->netfid;
1796 smb->OffsetLow = cpu_to_le32(wdata->offset & 0xFFFFFFFF);
1797 if (wct == 14)
1798 smb->OffsetHigh = cpu_to_le32(wdata->offset >> 32);
1799 smb->Reserved = 0xFFFFFFFF;
1800 smb->WriteMode = 0;
1801 smb->Remaining = 0;
1802
1803 smb->DataOffset =
1804 cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4);
1805
1806 /* 4 for RFC1001 length + 1 for BCC */
1807 iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4 + 1;
1808 iov[0].iov_base = smb;
1809
1810 /* marshal up the pages into iov array */
1811 wdata->bytes = 0;
1812 for (i = 0; i < wdata->nr_pages; i++) {
1813 iov[i + 1].iov_len = min(inode->i_size -
1814 page_offset(wdata->pages[i]),
1815 (loff_t)PAGE_CACHE_SIZE);
1816 iov[i + 1].iov_base = kmap(wdata->pages[i]);
1817 wdata->bytes += iov[i + 1].iov_len;
1818 }
1819
1820 cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
1821
1822 smb->DataLengthLow = cpu_to_le16(wdata->bytes & 0xFFFF);
1823 smb->DataLengthHigh = cpu_to_le16(wdata->bytes >> 16);
1824
1825 if (wct == 14) {
1826 inc_rfc1001_len(&smb->hdr, wdata->bytes + 1);
1827 put_bcc(wdata->bytes + 1, &smb->hdr);
1828 } else {
1829 /* wct == 12 */
1830 struct smb_com_writex_req *smbw =
1831 (struct smb_com_writex_req *)smb;
1832 inc_rfc1001_len(&smbw->hdr, wdata->bytes + 5);
1833 put_bcc(wdata->bytes + 5, &smbw->hdr);
1834 iov[0].iov_len += 4; /* pad bigger by four bytes */
1835 }
1836
1837 kref_get(&wdata->refcount);
1838 rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1,
1839 cifs_writev_callback, wdata, false);
1840
1841 if (rc == 0)
1842 cifs_stats_inc(&tcon->num_writes);
1843 else
1844 kref_put(&wdata->refcount, cifs_writedata_release);
1845
1846 /* send is done, unmap pages */
1847 for (i = 0; i < wdata->nr_pages; i++)
1848 kunmap(wdata->pages[i]);
1849
1850async_writev_out:
1851 cifs_small_buf_release(smb);
1852 kfree(iov);
1853 return rc;
1854}
1855
1605int 1856int
1606CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon, 1857CIFSSMBWrite2(const int xid, struct cifs_io_parms *io_parms,
1607 const int netfid, const unsigned int count, 1858 unsigned int *nbytes, struct kvec *iov, int n_vec,
1608 const __u64 offset, unsigned int *nbytes, struct kvec *iov, 1859 const int long_op)
1609 int n_vec, const int long_op)
1610{ 1860{
1611 int rc = -EACCES; 1861 int rc = -EACCES;
1612 WRITE_REQ *pSMB = NULL; 1862 WRITE_REQ *pSMB = NULL;
1613 int wct; 1863 int wct;
1614 int smb_hdr_len; 1864 int smb_hdr_len;
1615 int resp_buf_type = 0; 1865 int resp_buf_type = 0;
1866 __u32 pid = io_parms->pid;
1867 __u16 netfid = io_parms->netfid;
1868 __u64 offset = io_parms->offset;
1869 struct cifs_tcon *tcon = io_parms->tcon;
1870 unsigned int count = io_parms->length;
1616 1871
1617 *nbytes = 0; 1872 *nbytes = 0;
1618 1873
@@ -1630,6 +1885,10 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
1630 rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB); 1885 rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB);
1631 if (rc) 1886 if (rc)
1632 return rc; 1887 return rc;
1888
1889 pSMB->hdr.Pid = cpu_to_le16((__u16)pid);
1890 pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16));
1891
1633 /* tcon and ses pointer are checked in smb_init */ 1892 /* tcon and ses pointer are checked in smb_init */
1634 if (tcon->ses->server == NULL) 1893 if (tcon->ses->server == NULL)
1635 return -ECONNABORTED; 1894 return -ECONNABORTED;
@@ -1705,7 +1964,7 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
1705 1964
1706 1965
1707int 1966int
1708CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, 1967CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
1709 const __u16 smb_file_id, const __u64 len, 1968 const __u16 smb_file_id, const __u64 len,
1710 const __u64 offset, const __u32 numUnlock, 1969 const __u64 offset, const __u32 numUnlock,
1711 const __u32 numLock, const __u8 lockType, 1970 const __u32 numLock, const __u8 lockType,
@@ -1775,7 +2034,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
1775} 2034}
1776 2035
1777int 2036int
1778CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, 2037CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
1779 const __u16 smb_file_id, const int get_flag, const __u64 len, 2038 const __u16 smb_file_id, const int get_flag, const __u64 len,
1780 struct file_lock *pLockData, const __u16 lock_type, 2039 struct file_lock *pLockData, const __u16 lock_type,
1781 const bool waitFlag) 2040 const bool waitFlag)
@@ -1913,7 +2172,7 @@ plk_err_exit:
1913 2172
1914 2173
1915int 2174int
1916CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id) 2175CIFSSMBClose(const int xid, struct cifs_tcon *tcon, int smb_file_id)
1917{ 2176{
1918 int rc = 0; 2177 int rc = 0;
1919 CLOSE_REQ *pSMB = NULL; 2178 CLOSE_REQ *pSMB = NULL;
@@ -1946,7 +2205,7 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
1946} 2205}
1947 2206
1948int 2207int
1949CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon, int smb_file_id) 2208CIFSSMBFlush(const int xid, struct cifs_tcon *tcon, int smb_file_id)
1950{ 2209{
1951 int rc = 0; 2210 int rc = 0;
1952 FLUSH_REQ *pSMB = NULL; 2211 FLUSH_REQ *pSMB = NULL;
@@ -1967,7 +2226,7 @@ CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
1967} 2226}
1968 2227
1969int 2228int
1970CIFSSMBRename(const int xid, struct cifsTconInfo *tcon, 2229CIFSSMBRename(const int xid, struct cifs_tcon *tcon,
1971 const char *fromName, const char *toName, 2230 const char *fromName, const char *toName,
1972 const struct nls_table *nls_codepage, int remap) 2231 const struct nls_table *nls_codepage, int remap)
1973{ 2232{
@@ -2034,7 +2293,7 @@ renameRetry:
2034 return rc; 2293 return rc;
2035} 2294}
2036 2295
2037int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon, 2296int CIFSSMBRenameOpenFile(const int xid, struct cifs_tcon *pTcon,
2038 int netfid, const char *target_name, 2297 int netfid, const char *target_name,
2039 const struct nls_table *nls_codepage, int remap) 2298 const struct nls_table *nls_codepage, int remap)
2040{ 2299{
@@ -2114,7 +2373,7 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
2114} 2373}
2115 2374
2116int 2375int
2117CIFSSMBCopy(const int xid, struct cifsTconInfo *tcon, const char *fromName, 2376CIFSSMBCopy(const int xid, struct cifs_tcon *tcon, const char *fromName,
2118 const __u16 target_tid, const char *toName, const int flags, 2377 const __u16 target_tid, const char *toName, const int flags,
2119 const struct nls_table *nls_codepage, int remap) 2378 const struct nls_table *nls_codepage, int remap)
2120{ 2379{
@@ -2182,7 +2441,7 @@ copyRetry:
2182} 2441}
2183 2442
2184int 2443int
2185CIFSUnixCreateSymLink(const int xid, struct cifsTconInfo *tcon, 2444CIFSUnixCreateSymLink(const int xid, struct cifs_tcon *tcon,
2186 const char *fromName, const char *toName, 2445 const char *fromName, const char *toName,
2187 const struct nls_table *nls_codepage) 2446 const struct nls_table *nls_codepage)
2188{ 2447{
@@ -2271,7 +2530,7 @@ createSymLinkRetry:
2271} 2530}
2272 2531
2273int 2532int
2274CIFSUnixCreateHardLink(const int xid, struct cifsTconInfo *tcon, 2533CIFSUnixCreateHardLink(const int xid, struct cifs_tcon *tcon,
2275 const char *fromName, const char *toName, 2534 const char *fromName, const char *toName,
2276 const struct nls_table *nls_codepage, int remap) 2535 const struct nls_table *nls_codepage, int remap)
2277{ 2536{
@@ -2356,7 +2615,7 @@ createHardLinkRetry:
2356} 2615}
2357 2616
2358int 2617int
2359CIFSCreateHardLink(const int xid, struct cifsTconInfo *tcon, 2618CIFSCreateHardLink(const int xid, struct cifs_tcon *tcon,
2360 const char *fromName, const char *toName, 2619 const char *fromName, const char *toName,
2361 const struct nls_table *nls_codepage, int remap) 2620 const struct nls_table *nls_codepage, int remap)
2362{ 2621{
@@ -2428,7 +2687,7 @@ winCreateHardLinkRetry:
2428} 2687}
2429 2688
2430int 2689int
2431CIFSSMBUnixQuerySymLink(const int xid, struct cifsTconInfo *tcon, 2690CIFSSMBUnixQuerySymLink(const int xid, struct cifs_tcon *tcon,
2432 const unsigned char *searchName, char **symlinkinfo, 2691 const unsigned char *searchName, char **symlinkinfo,
2433 const struct nls_table *nls_codepage) 2692 const struct nls_table *nls_codepage)
2434{ 2693{
@@ -2533,7 +2792,7 @@ querySymLinkRetry:
2533 * it is not compiled in by default until callers fixed up and more tested. 2792 * it is not compiled in by default until callers fixed up and more tested.
2534 */ 2793 */
2535int 2794int
2536CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon, 2795CIFSSMBQueryReparseLinkInfo(const int xid, struct cifs_tcon *tcon,
2537 const unsigned char *searchName, 2796 const unsigned char *searchName,
2538 char *symlinkinfo, const int buflen, __u16 fid, 2797 char *symlinkinfo, const int buflen, __u16 fid,
2539 const struct nls_table *nls_codepage) 2798 const struct nls_table *nls_codepage)
@@ -2771,7 +3030,7 @@ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
2771} 3030}
2772 3031
2773int 3032int
2774CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon, 3033CIFSSMBGetPosixACL(const int xid, struct cifs_tcon *tcon,
2775 const unsigned char *searchName, 3034 const unsigned char *searchName,
2776 char *acl_inf, const int buflen, const int acl_type, 3035 char *acl_inf, const int buflen, const int acl_type,
2777 const struct nls_table *nls_codepage, int remap) 3036 const struct nls_table *nls_codepage, int remap)
@@ -2859,7 +3118,7 @@ queryAclRetry:
2859} 3118}
2860 3119
2861int 3120int
2862CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon, 3121CIFSSMBSetPosixACL(const int xid, struct cifs_tcon *tcon,
2863 const unsigned char *fileName, 3122 const unsigned char *fileName,
2864 const char *local_acl, const int buflen, 3123 const char *local_acl, const int buflen,
2865 const int acl_type, 3124 const int acl_type,
@@ -2939,7 +3198,7 @@ setACLerrorExit:
2939 3198
2940/* BB fix tabs in this function FIXME BB */ 3199/* BB fix tabs in this function FIXME BB */
2941int 3200int
2942CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon, 3201CIFSGetExtAttr(const int xid, struct cifs_tcon *tcon,
2943 const int netfid, __u64 *pExtAttrBits, __u64 *pMask) 3202 const int netfid, __u64 *pExtAttrBits, __u64 *pMask)
2944{ 3203{
2945 int rc = 0; 3204 int rc = 0;
@@ -3032,7 +3291,7 @@ GetExtAttrOut:
3032 */ 3291 */
3033static int 3292static int
3034smb_init_nttransact(const __u16 sub_command, const int setup_count, 3293smb_init_nttransact(const __u16 sub_command, const int setup_count,
3035 const int parm_len, struct cifsTconInfo *tcon, 3294 const int parm_len, struct cifs_tcon *tcon,
3036 void **ret_buf) 3295 void **ret_buf)
3037{ 3296{
3038 int rc; 3297 int rc;
@@ -3115,7 +3374,7 @@ validate_ntransact(char *buf, char **ppparm, char **ppdata,
3115 3374
3116/* Get Security Descriptor (by handle) from remote server for a file or dir */ 3375/* Get Security Descriptor (by handle) from remote server for a file or dir */
3117int 3376int
3118CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid, 3377CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid,
3119 struct cifs_ntsd **acl_inf, __u32 *pbuflen) 3378 struct cifs_ntsd **acl_inf, __u32 *pbuflen)
3120{ 3379{
3121 int rc = 0; 3380 int rc = 0;
@@ -3207,7 +3466,7 @@ qsec_out:
3207} 3466}
3208 3467
3209int 3468int
3210CIFSSMBSetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid, 3469CIFSSMBSetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid,
3211 struct cifs_ntsd *pntsd, __u32 acllen) 3470 struct cifs_ntsd *pntsd, __u32 acllen)
3212{ 3471{
3213 __u16 byte_count, param_count, data_count, param_offset, data_offset; 3472 __u16 byte_count, param_count, data_count, param_offset, data_offset;
@@ -3273,7 +3532,7 @@ setCifsAclRetry:
3273 3532
3274/* Legacy Query Path Information call for lookup to old servers such 3533/* Legacy Query Path Information call for lookup to old servers such
3275 as Win9x/WinME */ 3534 as Win9x/WinME */
3276int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon, 3535int SMBQueryInformation(const int xid, struct cifs_tcon *tcon,
3277 const unsigned char *searchName, 3536 const unsigned char *searchName,
3278 FILE_ALL_INFO *pFinfo, 3537 FILE_ALL_INFO *pFinfo,
3279 const struct nls_table *nls_codepage, int remap) 3538 const struct nls_table *nls_codepage, int remap)
@@ -3341,7 +3600,7 @@ QInfRetry:
3341} 3600}
3342 3601
3343int 3602int
3344CIFSSMBQFileInfo(const int xid, struct cifsTconInfo *tcon, 3603CIFSSMBQFileInfo(const int xid, struct cifs_tcon *tcon,
3345 u16 netfid, FILE_ALL_INFO *pFindData) 3604 u16 netfid, FILE_ALL_INFO *pFindData)
3346{ 3605{
3347 struct smb_t2_qfi_req *pSMB = NULL; 3606 struct smb_t2_qfi_req *pSMB = NULL;
@@ -3408,7 +3667,7 @@ QFileInfoRetry:
3408} 3667}
3409 3668
3410int 3669int
3411CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon, 3670CIFSSMBQPathInfo(const int xid, struct cifs_tcon *tcon,
3412 const unsigned char *searchName, 3671 const unsigned char *searchName,
3413 FILE_ALL_INFO *pFindData, 3672 FILE_ALL_INFO *pFindData,
3414 int legacy /* old style infolevel */, 3673 int legacy /* old style infolevel */,
@@ -3509,7 +3768,7 @@ QPathInfoRetry:
3509} 3768}
3510 3769
3511int 3770int
3512CIFSSMBUnixQFileInfo(const int xid, struct cifsTconInfo *tcon, 3771CIFSSMBUnixQFileInfo(const int xid, struct cifs_tcon *tcon,
3513 u16 netfid, FILE_UNIX_BASIC_INFO *pFindData) 3772 u16 netfid, FILE_UNIX_BASIC_INFO *pFindData)
3514{ 3773{
3515 struct smb_t2_qfi_req *pSMB = NULL; 3774 struct smb_t2_qfi_req *pSMB = NULL;
@@ -3578,7 +3837,7 @@ UnixQFileInfoRetry:
3578} 3837}
3579 3838
3580int 3839int
3581CIFSSMBUnixQPathInfo(const int xid, struct cifsTconInfo *tcon, 3840CIFSSMBUnixQPathInfo(const int xid, struct cifs_tcon *tcon,
3582 const unsigned char *searchName, 3841 const unsigned char *searchName,
3583 FILE_UNIX_BASIC_INFO *pFindData, 3842 FILE_UNIX_BASIC_INFO *pFindData,
3584 const struct nls_table *nls_codepage, int remap) 3843 const struct nls_table *nls_codepage, int remap)
@@ -3664,7 +3923,7 @@ UnixQPathInfoRetry:
3664 3923
3665/* xid, tcon, searchName and codepage are input parms, rest are returned */ 3924/* xid, tcon, searchName and codepage are input parms, rest are returned */
3666int 3925int
3667CIFSFindFirst(const int xid, struct cifsTconInfo *tcon, 3926CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
3668 const char *searchName, 3927 const char *searchName,
3669 const struct nls_table *nls_codepage, 3928 const struct nls_table *nls_codepage,
3670 __u16 *pnetfid, 3929 __u16 *pnetfid,
@@ -3812,7 +4071,7 @@ findFirstRetry:
3812 return rc; 4071 return rc;
3813} 4072}
3814 4073
3815int CIFSFindNext(const int xid, struct cifsTconInfo *tcon, 4074int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
3816 __u16 searchHandle, struct cifs_search_info *psrch_inf) 4075 __u16 searchHandle, struct cifs_search_info *psrch_inf)
3817{ 4076{
3818 TRANSACTION2_FNEXT_REQ *pSMB = NULL; 4077 TRANSACTION2_FNEXT_REQ *pSMB = NULL;
@@ -3950,7 +4209,7 @@ FNext2_err_exit:
3950} 4209}
3951 4210
3952int 4211int
3953CIFSFindClose(const int xid, struct cifsTconInfo *tcon, 4212CIFSFindClose(const int xid, struct cifs_tcon *tcon,
3954 const __u16 searchHandle) 4213 const __u16 searchHandle)
3955{ 4214{
3956 int rc = 0; 4215 int rc = 0;
@@ -3982,7 +4241,7 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon,
3982} 4241}
3983 4242
3984int 4243int
3985CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon, 4244CIFSGetSrvInodeNumber(const int xid, struct cifs_tcon *tcon,
3986 const unsigned char *searchName, 4245 const unsigned char *searchName,
3987 __u64 *inode_number, 4246 __u64 *inode_number,
3988 const struct nls_table *nls_codepage, int remap) 4247 const struct nls_table *nls_codepage, int remap)
@@ -4184,7 +4443,7 @@ parse_DFS_referrals_exit:
4184} 4443}
4185 4444
4186int 4445int
4187CIFSGetDFSRefer(const int xid, struct cifsSesInfo *ses, 4446CIFSGetDFSRefer(const int xid, struct cifs_ses *ses,
4188 const unsigned char *searchName, 4447 const unsigned char *searchName,
4189 struct dfs_info3_param **target_nodes, 4448 struct dfs_info3_param **target_nodes,
4190 unsigned int *num_of_nodes, 4449 unsigned int *num_of_nodes,
@@ -4233,7 +4492,7 @@ getDFSRetry:
4233 } 4492 }
4234 4493
4235 if (ses->server) { 4494 if (ses->server) {
4236 if (ses->server->secMode & 4495 if (ses->server->sec_mode &
4237 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 4496 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
4238 pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 4497 pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
4239 } 4498 }
@@ -4298,7 +4557,7 @@ GetDFSRefExit:
4298 4557
4299/* Query File System Info such as free space to old servers such as Win 9x */ 4558/* Query File System Info such as free space to old servers such as Win 9x */
4300int 4559int
4301SMBOldQFSInfo(const int xid, struct cifsTconInfo *tcon, struct kstatfs *FSData) 4560SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon, struct kstatfs *FSData)
4302{ 4561{
4303/* level 0x01 SMB_QUERY_FILE_SYSTEM_INFO */ 4562/* level 0x01 SMB_QUERY_FILE_SYSTEM_INFO */
4304 TRANSACTION2_QFSI_REQ *pSMB = NULL; 4563 TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -4377,7 +4636,7 @@ oldQFSInfoRetry:
4377} 4636}
4378 4637
4379int 4638int
4380CIFSSMBQFSInfo(const int xid, struct cifsTconInfo *tcon, struct kstatfs *FSData) 4639CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon, struct kstatfs *FSData)
4381{ 4640{
4382/* level 0x103 SMB_QUERY_FILE_SYSTEM_INFO */ 4641/* level 0x103 SMB_QUERY_FILE_SYSTEM_INFO */
4383 TRANSACTION2_QFSI_REQ *pSMB = NULL; 4642 TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -4456,7 +4715,7 @@ QFSInfoRetry:
4456} 4715}
4457 4716
4458int 4717int
4459CIFSSMBQFSAttributeInfo(const int xid, struct cifsTconInfo *tcon) 4718CIFSSMBQFSAttributeInfo(const int xid, struct cifs_tcon *tcon)
4460{ 4719{
4461/* level 0x105 SMB_QUERY_FILE_SYSTEM_INFO */ 4720/* level 0x105 SMB_QUERY_FILE_SYSTEM_INFO */
4462 TRANSACTION2_QFSI_REQ *pSMB = NULL; 4721 TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -4526,7 +4785,7 @@ QFSAttributeRetry:
4526} 4785}
4527 4786
4528int 4787int
4529CIFSSMBQFSDeviceInfo(const int xid, struct cifsTconInfo *tcon) 4788CIFSSMBQFSDeviceInfo(const int xid, struct cifs_tcon *tcon)
4530{ 4789{
4531/* level 0x104 SMB_QUERY_FILE_SYSTEM_INFO */ 4790/* level 0x104 SMB_QUERY_FILE_SYSTEM_INFO */
4532 TRANSACTION2_QFSI_REQ *pSMB = NULL; 4791 TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -4597,7 +4856,7 @@ QFSDeviceRetry:
4597} 4856}
4598 4857
4599int 4858int
4600CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon) 4859CIFSSMBQFSUnixInfo(const int xid, struct cifs_tcon *tcon)
4601{ 4860{
4602/* level 0x200 SMB_QUERY_CIFS_UNIX_INFO */ 4861/* level 0x200 SMB_QUERY_CIFS_UNIX_INFO */
4603 TRANSACTION2_QFSI_REQ *pSMB = NULL; 4862 TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -4667,7 +4926,7 @@ QFSUnixRetry:
4667} 4926}
4668 4927
4669int 4928int
4670CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon, __u64 cap) 4929CIFSSMBSetFSUnixInfo(const int xid, struct cifs_tcon *tcon, __u64 cap)
4671{ 4930{
4672/* level 0x200 SMB_SET_CIFS_UNIX_INFO */ 4931/* level 0x200 SMB_SET_CIFS_UNIX_INFO */
4673 TRANSACTION2_SETFSI_REQ *pSMB = NULL; 4932 TRANSACTION2_SETFSI_REQ *pSMB = NULL;
@@ -4741,7 +5000,7 @@ SETFSUnixRetry:
4741 5000
4742 5001
4743int 5002int
4744CIFSSMBQFSPosixInfo(const int xid, struct cifsTconInfo *tcon, 5003CIFSSMBQFSPosixInfo(const int xid, struct cifs_tcon *tcon,
4745 struct kstatfs *FSData) 5004 struct kstatfs *FSData)
4746{ 5005{
4747/* level 0x201 SMB_QUERY_CIFS_POSIX_INFO */ 5006/* level 0x201 SMB_QUERY_CIFS_POSIX_INFO */
@@ -4834,7 +5093,7 @@ QFSPosixRetry:
4834 in Samba which this routine can run into */ 5093 in Samba which this routine can run into */
4835 5094
4836int 5095int
4837CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon, const char *fileName, 5096CIFSSMBSetEOF(const int xid, struct cifs_tcon *tcon, const char *fileName,
4838 __u64 size, bool SetAllocation, 5097 __u64 size, bool SetAllocation,
4839 const struct nls_table *nls_codepage, int remap) 5098 const struct nls_table *nls_codepage, int remap)
4840{ 5099{
@@ -4923,7 +5182,7 @@ SetEOFRetry:
4923} 5182}
4924 5183
4925int 5184int
4926CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size, 5185CIFSSMBSetFileSize(const int xid, struct cifs_tcon *tcon, __u64 size,
4927 __u16 fid, __u32 pid_of_opener, bool SetAllocation) 5186 __u16 fid, __u32 pid_of_opener, bool SetAllocation)
4928{ 5187{
4929 struct smb_com_transaction2_sfi_req *pSMB = NULL; 5188 struct smb_com_transaction2_sfi_req *pSMB = NULL;
@@ -5005,7 +5264,7 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
5005 time and resort to the original setpathinfo level which takes the ancient 5264 time and resort to the original setpathinfo level which takes the ancient
5006 DOS time format with 2 second granularity */ 5265 DOS time format with 2 second granularity */
5007int 5266int
5008CIFSSMBSetFileInfo(const int xid, struct cifsTconInfo *tcon, 5267CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon,
5009 const FILE_BASIC_INFO *data, __u16 fid, __u32 pid_of_opener) 5268 const FILE_BASIC_INFO *data, __u16 fid, __u32 pid_of_opener)
5010{ 5269{
5011 struct smb_com_transaction2_sfi_req *pSMB = NULL; 5270 struct smb_com_transaction2_sfi_req *pSMB = NULL;
@@ -5067,7 +5326,7 @@ CIFSSMBSetFileInfo(const int xid, struct cifsTconInfo *tcon,
5067} 5326}
5068 5327
5069int 5328int
5070CIFSSMBSetFileDisposition(const int xid, struct cifsTconInfo *tcon, 5329CIFSSMBSetFileDisposition(const int xid, struct cifs_tcon *tcon,
5071 bool delete_file, __u16 fid, __u32 pid_of_opener) 5330 bool delete_file, __u16 fid, __u32 pid_of_opener)
5072{ 5331{
5073 struct smb_com_transaction2_sfi_req *pSMB = NULL; 5332 struct smb_com_transaction2_sfi_req *pSMB = NULL;
@@ -5123,7 +5382,7 @@ CIFSSMBSetFileDisposition(const int xid, struct cifsTconInfo *tcon,
5123} 5382}
5124 5383
5125int 5384int
5126CIFSSMBSetPathInfo(const int xid, struct cifsTconInfo *tcon, 5385CIFSSMBSetPathInfo(const int xid, struct cifs_tcon *tcon,
5127 const char *fileName, const FILE_BASIC_INFO *data, 5386 const char *fileName, const FILE_BASIC_INFO *data,
5128 const struct nls_table *nls_codepage, int remap) 5387 const struct nls_table *nls_codepage, int remap)
5129{ 5388{
@@ -5207,7 +5466,7 @@ SetTimesRetry:
5207 handling it anyway and NT4 was what we thought it would be needed for 5466 handling it anyway and NT4 was what we thought it would be needed for
5208 Do not delete it until we prove whether needed for Win9x though */ 5467 Do not delete it until we prove whether needed for Win9x though */
5209int 5468int
5210CIFSSMBSetAttrLegacy(int xid, struct cifsTconInfo *tcon, char *fileName, 5469CIFSSMBSetAttrLegacy(int xid, struct cifs_tcon *tcon, char *fileName,
5211 __u16 dos_attrs, const struct nls_table *nls_codepage) 5470 __u16 dos_attrs, const struct nls_table *nls_codepage)
5212{ 5471{
5213 SETATTR_REQ *pSMB = NULL; 5472 SETATTR_REQ *pSMB = NULL;
@@ -5295,7 +5554,7 @@ cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset,
5295} 5554}
5296 5555
5297int 5556int
5298CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon, 5557CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon,
5299 const struct cifs_unix_set_info_args *args, 5558 const struct cifs_unix_set_info_args *args,
5300 u16 fid, u32 pid_of_opener) 5559 u16 fid, u32 pid_of_opener)
5301{ 5560{
@@ -5358,7 +5617,7 @@ CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon,
5358} 5617}
5359 5618
5360int 5619int
5361CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *tcon, char *fileName, 5620CIFSSMBUnixSetPathInfo(const int xid, struct cifs_tcon *tcon, char *fileName,
5362 const struct cifs_unix_set_info_args *args, 5621 const struct cifs_unix_set_info_args *args,
5363 const struct nls_table *nls_codepage, int remap) 5622 const struct nls_table *nls_codepage, int remap)
5364{ 5623{
@@ -5445,7 +5704,7 @@ setPermsRetry:
5445 * the data isn't copied to it, but the length is returned. 5704 * the data isn't copied to it, but the length is returned.
5446 */ 5705 */
5447ssize_t 5706ssize_t
5448CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon, 5707CIFSSMBQAllEAs(const int xid, struct cifs_tcon *tcon,
5449 const unsigned char *searchName, const unsigned char *ea_name, 5708 const unsigned char *searchName, const unsigned char *ea_name,
5450 char *EAData, size_t buf_size, 5709 char *EAData, size_t buf_size,
5451 const struct nls_table *nls_codepage, int remap) 5710 const struct nls_table *nls_codepage, int remap)
@@ -5626,7 +5885,7 @@ QAllEAsOut:
5626} 5885}
5627 5886
5628int 5887int
5629CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon, const char *fileName, 5888CIFSSMBSetEA(const int xid, struct cifs_tcon *tcon, const char *fileName,
5630 const char *ea_name, const void *ea_value, 5889 const char *ea_name, const void *ea_value,
5631 const __u16 ea_value_len, const struct nls_table *nls_codepage, 5890 const __u16 ea_value_len, const struct nls_table *nls_codepage,
5632 int remap) 5891 int remap)
@@ -5753,7 +6012,7 @@ SetEARetry:
5753 * incompatible for network fs clients, we could instead simply 6012 * incompatible for network fs clients, we could instead simply
5754 * expose this config flag by adding a future cifs (and smb2) notify ioctl. 6013 * expose this config flag by adding a future cifs (and smb2) notify ioctl.
5755 */ 6014 */
5756int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon, 6015int CIFSSMBNotify(const int xid, struct cifs_tcon *tcon,
5757 const int notify_subdirs, const __u16 netfid, 6016 const int notify_subdirs, const __u16 netfid,
5758 __u32 filter, struct file *pfile, int multishot, 6017 __u32 filter, struct file *pfile, int multishot,
5759 const struct nls_table *nls_codepage) 6018 const struct nls_table *nls_codepage)
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index da284e3cb653..6d88b82537c3 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -57,62 +57,6 @@
57 57
58extern mempool_t *cifs_req_poolp; 58extern mempool_t *cifs_req_poolp;
59 59
60struct smb_vol {
61 char *username;
62 char *password;
63 char *domainname;
64 char *UNC;
65 char *UNCip;
66 char *iocharset; /* local code page for mapping to and from Unicode */
67 char source_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* clnt nb name */
68 char target_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* srvr nb name */
69 uid_t cred_uid;
70 uid_t linux_uid;
71 gid_t linux_gid;
72 mode_t file_mode;
73 mode_t dir_mode;
74 unsigned secFlg;
75 bool retry:1;
76 bool intr:1;
77 bool setuids:1;
78 bool override_uid:1;
79 bool override_gid:1;
80 bool dynperm:1;
81 bool noperm:1;
82 bool no_psx_acl:1; /* set if posix acl support should be disabled */
83 bool cifs_acl:1;
84 bool no_xattr:1; /* set if xattr (EA) support should be disabled*/
85 bool server_ino:1; /* use inode numbers from server ie UniqueId */
86 bool direct_io:1;
87 bool strict_io:1; /* strict cache behavior */
88 bool remap:1; /* set to remap seven reserved chars in filenames */
89 bool posix_paths:1; /* unset to not ask for posix pathnames. */
90 bool no_linux_ext:1;
91 bool sfu_emul:1;
92 bool nullauth:1; /* attempt to authenticate with null user */
93 bool nocase:1; /* request case insensitive filenames */
94 bool nobrl:1; /* disable sending byte range locks to srv */
95 bool mand_lock:1; /* send mandatory not posix byte range lock reqs */
96 bool seal:1; /* request transport encryption on share */
97 bool nodfs:1; /* Do not request DFS, even if available */
98 bool local_lease:1; /* check leases only on local system, not remote */
99 bool noblocksnd:1;
100 bool noautotune:1;
101 bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
102 bool fsc:1; /* enable fscache */
103 bool mfsymlinks:1; /* use Minshall+French Symlinks */
104 bool multiuser:1;
105 bool use_smb2:1; /* force smb2 use on mount instead of cifs */
106 unsigned int rsize;
107 unsigned int wsize;
108 bool sockopt_tcp_nodelay:1;
109 unsigned short int port;
110 unsigned long actimeo; /* attribute cache timeout (jiffies) */
111 char *prepath;
112 struct sockaddr_storage srcaddr; /* allow binding to a local IP */
113 struct nls_table *local_nls;
114};
115
116/* FIXME: should these be tunable? */ 60/* FIXME: should these be tunable? */
117#define TLINK_ERROR_EXPIRE (1 * HZ) 61#define TLINK_ERROR_EXPIRE (1 * HZ)
118#define TLINK_IDLE_EXPIRE (600 * HZ) 62#define TLINK_IDLE_EXPIRE (600 * HZ)
@@ -135,9 +79,10 @@ cifs_reconnect(struct TCP_Server_Info *server)
135{ 79{
136 int rc = 0; 80 int rc = 0;
137 struct list_head *tmp, *tmp2; 81 struct list_head *tmp, *tmp2;
138 struct cifsSesInfo *ses; 82 struct cifs_ses *ses;
139 struct cifsTconInfo *tcon; 83 struct cifs_tcon *tcon;
140 struct mid_q_entry *mid_entry; 84 struct mid_q_entry *mid_entry;
85 struct list_head retry_list;
141 86
142 spin_lock(&GlobalMid_Lock); 87 spin_lock(&GlobalMid_Lock);
143 if (server->tcpStatus == CifsExiting) { 88 if (server->tcpStatus == CifsExiting) {
@@ -157,11 +102,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
157 cFYI(1, "%s: marking sessions and tcons for reconnect", __func__); 102 cFYI(1, "%s: marking sessions and tcons for reconnect", __func__);
158 spin_lock(&cifs_tcp_ses_lock); 103 spin_lock(&cifs_tcp_ses_lock);
159 list_for_each(tmp, &server->smb_ses_list) { 104 list_for_each(tmp, &server->smb_ses_list) {
160 ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); 105 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
161 ses->need_reconnect = true; 106 ses->need_reconnect = true;
162 ses->ipc_tid = 0; 107 ses->ipc_tid = 0;
163 list_for_each(tmp2, &ses->tcon_list) { 108 list_for_each(tmp2, &ses->tcon_list) {
164 tcon = list_entry(tmp2, struct cifsTconInfo, tcon_list); 109 tcon = list_entry(tmp2, struct cifs_tcon, tcon_list);
165 tcon->need_reconnect = true; 110 tcon->need_reconnect = true;
166 } 111 }
167 } 112 }
@@ -189,16 +134,23 @@ cifs_reconnect(struct TCP_Server_Info *server)
189 mutex_unlock(&server->srv_mutex); 134 mutex_unlock(&server->srv_mutex);
190 135
191 /* mark submitted MIDs for retry and issue callback */ 136 /* mark submitted MIDs for retry and issue callback */
192 cFYI(1, "%s: issuing mid callbacks", __func__); 137 INIT_LIST_HEAD(&retry_list);
138 cFYI(1, "%s: moving mids to private list", __func__);
193 spin_lock(&GlobalMid_Lock); 139 spin_lock(&GlobalMid_Lock);
194 list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { 140 list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
195 mid_entry = list_entry(tmp, struct mid_q_entry, qhead); 141 mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
196 if (mid_entry->midState == MID_REQUEST_SUBMITTED) 142 if (mid_entry->midState == MID_REQUEST_SUBMITTED)
197 mid_entry->midState = MID_RETRY_NEEDED; 143 mid_entry->midState = MID_RETRY_NEEDED;
144 list_move(&mid_entry->qhead, &retry_list);
145 }
146 spin_unlock(&GlobalMid_Lock);
147
148 cFYI(1, "%s: issuing mid callbacks", __func__);
149 list_for_each_safe(tmp, tmp2, &retry_list) {
150 mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
198 list_del_init(&mid_entry->qhead); 151 list_del_init(&mid_entry->qhead);
199 mid_entry->callback(mid_entry); 152 mid_entry->callback(mid_entry);
200 } 153 }
201 spin_unlock(&GlobalMid_Lock);
202 154
203 while (server->tcpStatus == CifsNeedReconnect) { 155 while (server->tcpStatus == CifsNeedReconnect) {
204 try_to_freeze(); 156 try_to_freeze();
@@ -672,12 +624,12 @@ multi_t2_fnd:
672 mid_entry->when_received = jiffies; 624 mid_entry->when_received = jiffies;
673#endif 625#endif
674 list_del_init(&mid_entry->qhead); 626 list_del_init(&mid_entry->qhead);
675 mid_entry->callback(mid_entry);
676 break; 627 break;
677 } 628 }
678 spin_unlock(&GlobalMid_Lock); 629 spin_unlock(&GlobalMid_Lock);
679 630
680 if (mid_entry != NULL) { 631 if (mid_entry != NULL) {
632 mid_entry->callback(mid_entry);
681 /* Was previous buf put in mpx struct for multi-rsp? */ 633 /* Was previous buf put in mpx struct for multi-rsp? */
682 if (!isMultiRsp) { 634 if (!isMultiRsp) {
683 /* smb buffer will be freed by user thread */ 635 /* smb buffer will be freed by user thread */
@@ -741,15 +693,25 @@ multi_t2_fnd:
741 cifs_small_buf_release(smallbuf); 693 cifs_small_buf_release(smallbuf);
742 694
743 if (!list_empty(&server->pending_mid_q)) { 695 if (!list_empty(&server->pending_mid_q)) {
696 struct list_head dispose_list;
697
698 INIT_LIST_HEAD(&dispose_list);
744 spin_lock(&GlobalMid_Lock); 699 spin_lock(&GlobalMid_Lock);
745 list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { 700 list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
746 mid_entry = list_entry(tmp, struct mid_q_entry, qhead); 701 mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
747 cFYI(1, "Clearing Mid 0x%x - issuing callback", 702 cFYI(1, "Clearing mid 0x%x", mid_entry->mid);
748 mid_entry->mid); 703 mid_entry->midState = MID_SHUTDOWN;
704 list_move(&mid_entry->qhead, &dispose_list);
705 }
706 spin_unlock(&GlobalMid_Lock);
707
708 /* now walk dispose list and issue callbacks */
709 list_for_each_safe(tmp, tmp2, &dispose_list) {
710 mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
711 cFYI(1, "Callback mid 0x%x", mid_entry->mid);
749 list_del_init(&mid_entry->qhead); 712 list_del_init(&mid_entry->qhead);
750 mid_entry->callback(mid_entry); 713 mid_entry->callback(mid_entry);
751 } 714 }
752 spin_unlock(&GlobalMid_Lock);
753 /* 1/8th of sec is more than enough time for them to exit */ 715 /* 1/8th of sec is more than enough time for them to exit */
754 msleep(125); 716 msleep(125);
755 } 717 }
@@ -1062,13 +1024,6 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1062 (strnicmp(value, "1", 1) == 0)) { 1024 (strnicmp(value, "1", 1) == 0)) {
1063 /* this is the default */ 1025 /* this is the default */
1064 continue; 1026 continue;
1065 } else if ((strnicmp(value, "smb2", 4) == 0) ||
1066 (strnicmp(value, "2", 1) == 0)) {
1067#ifdef CONFIG_CIFS_SMB2
1068 vol->use_smb2 = true;
1069#else
1070 cERROR(1, "smb2 support not enabled");
1071#endif /* CONFIG_CIFS_SMB2 */
1072 } 1027 }
1073 } else if ((strnicmp(data, "unc", 3) == 0) 1028 } else if ((strnicmp(data, "unc", 3) == 0)
1074 || (strnicmp(data, "target", 6) == 0) 1029 || (strnicmp(data, "target", 6) == 0)
@@ -1404,6 +1359,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1404 vol->server_ino = 1; 1359 vol->server_ino = 1;
1405 } else if (strnicmp(data, "noserverino", 9) == 0) { 1360 } else if (strnicmp(data, "noserverino", 9) == 0) {
1406 vol->server_ino = 0; 1361 vol->server_ino = 0;
1362 } else if (strnicmp(data, "rwpidforward", 4) == 0) {
1363 vol->rwpidforward = 1;
1407 } else if (strnicmp(data, "cifsacl", 7) == 0) { 1364 } else if (strnicmp(data, "cifsacl", 7) == 0) {
1408 vol->cifs_acl = 1; 1365 vol->cifs_acl = 1;
1409 } else if (strnicmp(data, "nocifsacl", 9) == 0) { 1366 } else if (strnicmp(data, "nocifsacl", 9) == 0) {
@@ -1640,16 +1597,35 @@ match_security(struct TCP_Server_Info *server, struct smb_vol *vol)
1640 1597
1641 /* now check if signing mode is acceptable */ 1598 /* now check if signing mode is acceptable */
1642 if ((secFlags & CIFSSEC_MAY_SIGN) == 0 && 1599 if ((secFlags & CIFSSEC_MAY_SIGN) == 0 &&
1643 (server->secMode & SECMODE_SIGN_REQUIRED)) 1600 (server->sec_mode & SECMODE_SIGN_REQUIRED))
1644 return false; 1601 return false;
1645 else if (((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) && 1602 else if (((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) &&
1646 (server->secMode & 1603 (server->sec_mode &
1647 (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED)) == 0) 1604 (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED)) == 0)
1648 return false; 1605 return false;
1649 1606
1650 return true; 1607 return true;
1651} 1608}
1652 1609
1610static int match_server(struct TCP_Server_Info *server, struct sockaddr *addr,
1611 struct smb_vol *vol)
1612{
1613 if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
1614 return 0;
1615
1616 if (!match_address(server, addr,
1617 (struct sockaddr *)&vol->srcaddr))
1618 return 0;
1619
1620 if (!match_port(server, addr))
1621 return 0;
1622
1623 if (!match_security(server, vol))
1624 return 0;
1625
1626 return 1;
1627}
1628
1653static struct TCP_Server_Info * 1629static struct TCP_Server_Info *
1654cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol) 1630cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol)
1655{ 1631{
@@ -1657,17 +1633,7 @@ cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol)
1657 1633
1658 spin_lock(&cifs_tcp_ses_lock); 1634 spin_lock(&cifs_tcp_ses_lock);
1659 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { 1635 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
1660 if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns)) 1636 if (!match_server(server, addr, vol))
1661 continue;
1662
1663 if (!match_address(server, addr,
1664 (struct sockaddr *)&vol->srcaddr))
1665 continue;
1666
1667 if (!match_port(server, addr))
1668 continue;
1669
1670 if (!match_security(server, vol))
1671 continue; 1637 continue;
1672 1638
1673 ++server->srv_count; 1639 ++server->srv_count;
@@ -1861,32 +1827,39 @@ out_err:
1861 return ERR_PTR(rc); 1827 return ERR_PTR(rc);
1862} 1828}
1863 1829
1864static struct cifsSesInfo * 1830static int match_session(struct cifs_ses *ses, struct smb_vol *vol)
1831{
1832 switch (ses->server->secType) {
1833 case Kerberos:
1834 if (vol->cred_uid != ses->cred_uid)
1835 return 0;
1836 break;
1837 default:
1838 /* anything else takes username/password */
1839 if (ses->user_name == NULL)
1840 return 0;
1841 if (strncmp(ses->user_name, vol->username,
1842 MAX_USERNAME_SIZE))
1843 return 0;
1844 if (strlen(vol->username) != 0 &&
1845 ses->password != NULL &&
1846 strncmp(ses->password,
1847 vol->password ? vol->password : "",
1848 MAX_PASSWORD_SIZE))
1849 return 0;
1850 }
1851 return 1;
1852}
1853
1854static struct cifs_ses *
1865cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol) 1855cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
1866{ 1856{
1867 struct cifsSesInfo *ses; 1857 struct cifs_ses *ses;
1868 1858
1869 spin_lock(&cifs_tcp_ses_lock); 1859 spin_lock(&cifs_tcp_ses_lock);
1870 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 1860 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1871 switch (server->secType) { 1861 if (!match_session(ses, vol))
1872 case Kerberos: 1862 continue;
1873 if (vol->cred_uid != ses->cred_uid)
1874 continue;
1875 break;
1876 default:
1877 /* anything else takes username/password */
1878 if (ses->user_name == NULL)
1879 continue;
1880 if (strncmp(ses->user_name, vol->username,
1881 MAX_USERNAME_SIZE))
1882 continue;
1883 if (strlen(vol->username) != 0 &&
1884 ses->password != NULL &&
1885 strncmp(ses->password,
1886 vol->password ? vol->password : "",
1887 MAX_PASSWORD_SIZE))
1888 continue;
1889 }
1890 ++ses->ses_count; 1863 ++ses->ses_count;
1891 spin_unlock(&cifs_tcp_ses_lock); 1864 spin_unlock(&cifs_tcp_ses_lock);
1892 return ses; 1865 return ses;
@@ -1896,7 +1869,7 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
1896} 1869}
1897 1870
1898static void 1871static void
1899cifs_put_smb_ses(struct cifsSesInfo *ses) 1872cifs_put_smb_ses(struct cifs_ses *ses)
1900{ 1873{
1901 int xid; 1874 int xid;
1902 struct TCP_Server_Info *server = ses->server; 1875 struct TCP_Server_Info *server = ses->server;
@@ -1922,11 +1895,11 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
1922 1895
1923static bool warned_on_ntlm; /* globals init to false automatically */ 1896static bool warned_on_ntlm; /* globals init to false automatically */
1924 1897
1925static struct cifsSesInfo * 1898static struct cifs_ses *
1926cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) 1899cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
1927{ 1900{
1928 int rc = -ENOMEM, xid; 1901 int rc = -ENOMEM, xid;
1929 struct cifsSesInfo *ses; 1902 struct cifs_ses *ses;
1930 struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; 1903 struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
1931 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; 1904 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
1932 1905
@@ -2029,20 +2002,26 @@ get_ses_fail:
2029 return ERR_PTR(rc); 2002 return ERR_PTR(rc);
2030} 2003}
2031 2004
2032static struct cifsTconInfo * 2005static int match_tcon(struct cifs_tcon *tcon, const char *unc)
2033cifs_find_tcon(struct cifsSesInfo *ses, const char *unc) 2006{
2007 if (tcon->tidStatus == CifsExiting)
2008 return 0;
2009 if (strncmp(tcon->treeName, unc, MAX_TREE_SIZE))
2010 return 0;
2011 return 1;
2012}
2013
2014static struct cifs_tcon *
2015cifs_find_tcon(struct cifs_ses *ses, const char *unc)
2034{ 2016{
2035 struct list_head *tmp; 2017 struct list_head *tmp;
2036 struct cifsTconInfo *tcon; 2018 struct cifs_tcon *tcon;
2037 2019
2038 spin_lock(&cifs_tcp_ses_lock); 2020 spin_lock(&cifs_tcp_ses_lock);
2039 list_for_each(tmp, &ses->tcon_list) { 2021 list_for_each(tmp, &ses->tcon_list) {
2040 tcon = list_entry(tmp, struct cifsTconInfo, tcon_list); 2022 tcon = list_entry(tmp, struct cifs_tcon, tcon_list);
2041 if (tcon->tidStatus == CifsExiting) 2023 if (!match_tcon(tcon, unc))
2042 continue;
2043 if (strncmp(tcon->treeName, unc, MAX_TREE_SIZE))
2044 continue; 2024 continue;
2045
2046 ++tcon->tc_count; 2025 ++tcon->tc_count;
2047 spin_unlock(&cifs_tcp_ses_lock); 2026 spin_unlock(&cifs_tcp_ses_lock);
2048 return tcon; 2027 return tcon;
@@ -2052,10 +2031,10 @@ cifs_find_tcon(struct cifsSesInfo *ses, const char *unc)
2052} 2031}
2053 2032
2054static void 2033static void
2055cifs_put_tcon(struct cifsTconInfo *tcon) 2034cifs_put_tcon(struct cifs_tcon *tcon)
2056{ 2035{
2057 int xid; 2036 int xid;
2058 struct cifsSesInfo *ses = tcon->ses; 2037 struct cifs_ses *ses = tcon->ses;
2059 2038
2060 cFYI(1, "%s: tc_count=%d\n", __func__, tcon->tc_count); 2039 cFYI(1, "%s: tc_count=%d\n", __func__, tcon->tc_count);
2061 spin_lock(&cifs_tcp_ses_lock); 2040 spin_lock(&cifs_tcp_ses_lock);
@@ -2076,11 +2055,11 @@ cifs_put_tcon(struct cifsTconInfo *tcon)
2076 cifs_put_smb_ses(ses); 2055 cifs_put_smb_ses(ses);
2077} 2056}
2078 2057
2079static struct cifsTconInfo * 2058static struct cifs_tcon *
2080cifs_get_tcon(struct cifsSesInfo *ses, struct smb_vol *volume_info) 2059cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
2081{ 2060{
2082 int rc, xid; 2061 int rc, xid;
2083 struct cifsTconInfo *tcon; 2062 struct cifs_tcon *tcon;
2084 2063
2085 tcon = cifs_find_tcon(ses, volume_info->UNC); 2064 tcon = cifs_find_tcon(ses, volume_info->UNC);
2086 if (tcon) { 2065 if (tcon) {
@@ -2169,8 +2148,102 @@ cifs_put_tlink(struct tcon_link *tlink)
2169 return; 2148 return;
2170} 2149}
2171 2150
2151static inline struct tcon_link *
2152cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb);
2153
2154static int
2155compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
2156{
2157 struct cifs_sb_info *old = CIFS_SB(sb);
2158 struct cifs_sb_info *new = mnt_data->cifs_sb;
2159
2160 if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK))
2161 return 0;
2162
2163 if ((old->mnt_cifs_flags & CIFS_MOUNT_MASK) !=
2164 (new->mnt_cifs_flags & CIFS_MOUNT_MASK))
2165 return 0;
2166
2167 if (old->rsize != new->rsize)
2168 return 0;
2169
2170 /*
2171 * We want to share sb only if we don't specify wsize or specified wsize
2172 * is greater or equal than existing one.
2173 */
2174 if (new->wsize && new->wsize < old->wsize)
2175 return 0;
2176
2177 if (old->mnt_uid != new->mnt_uid || old->mnt_gid != new->mnt_gid)
2178 return 0;
2179
2180 if (old->mnt_file_mode != new->mnt_file_mode ||
2181 old->mnt_dir_mode != new->mnt_dir_mode)
2182 return 0;
2183
2184 if (strcmp(old->local_nls->charset, new->local_nls->charset))
2185 return 0;
2186
2187 if (old->actimeo != new->actimeo)
2188 return 0;
2189
2190 return 1;
2191}
2192
2172int 2193int
2173get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path, 2194cifs_match_super(struct super_block *sb, void *data)
2195{
2196 struct cifs_mnt_data *mnt_data = (struct cifs_mnt_data *)data;
2197 struct smb_vol *volume_info;
2198 struct cifs_sb_info *cifs_sb;
2199 struct TCP_Server_Info *tcp_srv;
2200 struct cifs_ses *ses;
2201 struct cifs_tcon *tcon;
2202 struct tcon_link *tlink;
2203 struct sockaddr_storage addr;
2204 int rc = 0;
2205
2206 memset(&addr, 0, sizeof(struct sockaddr_storage));
2207
2208 spin_lock(&cifs_tcp_ses_lock);
2209 cifs_sb = CIFS_SB(sb);
2210 tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
2211 if (IS_ERR(tlink)) {
2212 spin_unlock(&cifs_tcp_ses_lock);
2213 return rc;
2214 }
2215 tcon = tlink_tcon(tlink);
2216 ses = tcon->ses;
2217 tcp_srv = ses->server;
2218
2219 volume_info = mnt_data->vol;
2220
2221 if (!volume_info->UNCip || !volume_info->UNC)
2222 goto out;
2223
2224 rc = cifs_fill_sockaddr((struct sockaddr *)&addr,
2225 volume_info->UNCip,
2226 strlen(volume_info->UNCip),
2227 volume_info->port);
2228 if (!rc)
2229 goto out;
2230
2231 if (!match_server(tcp_srv, (struct sockaddr *)&addr, volume_info) ||
2232 !match_session(ses, volume_info) ||
2233 !match_tcon(tcon, volume_info->UNC)) {
2234 rc = 0;
2235 goto out;
2236 }
2237
2238 rc = compare_mount_options(sb, mnt_data);
2239out:
2240 cifs_put_tlink(tlink);
2241 spin_unlock(&cifs_tcp_ses_lock);
2242 return rc;
2243}
2244
2245int
2246get_dfs_path(int xid, struct cifs_ses *pSesInfo, const char *old_path,
2174 const struct nls_table *nls_codepage, unsigned int *pnum_referrals, 2247 const struct nls_table *nls_codepage, unsigned int *pnum_referrals,
2175 struct dfs_info3_param **preferrals, int remap) 2248 struct dfs_info3_param **preferrals, int remap)
2176{ 2249{
@@ -2469,7 +2542,7 @@ ip_connect(struct TCP_Server_Info *server)
2469 return generic_ip_connect(server); 2542 return generic_ip_connect(server);
2470} 2543}
2471 2544
2472void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, 2545void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
2473 struct super_block *sb, struct smb_vol *vol_info) 2546 struct super_block *sb, struct smb_vol *vol_info)
2474{ 2547{
2475 /* if we are reconnecting then should we check to see if 2548 /* if we are reconnecting then should we check to see if
@@ -2498,7 +2571,7 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
2498 2571
2499 if (!CIFSSMBQFSUnixInfo(xid, tcon)) { 2572 if (!CIFSSMBQFSUnixInfo(xid, tcon)) {
2500 __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability); 2573 __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
2501 2574 cFYI(1, "unix caps which server supports %lld", cap);
2502 /* check for reconnect case in which we do not 2575 /* check for reconnect case in which we do not
2503 want to change the mount behavior if we can avoid it */ 2576 want to change the mount behavior if we can avoid it */
2504 if (vol_info == NULL) { 2577 if (vol_info == NULL) {
@@ -2516,6 +2589,9 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
2516 } 2589 }
2517 } 2590 }
2518 2591
2592 if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
2593 cERROR(1, "per-share encryption not supported yet");
2594
2519 cap &= CIFS_UNIX_CAP_MASK; 2595 cap &= CIFS_UNIX_CAP_MASK;
2520 if (vol_info && vol_info->no_psx_acl) 2596 if (vol_info && vol_info->no_psx_acl)
2521 cap &= ~CIFS_UNIX_POSIX_ACL_CAP; 2597 cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
@@ -2534,12 +2610,6 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
2534 CIFS_MOUNT_POSIX_PATHS; 2610 CIFS_MOUNT_POSIX_PATHS;
2535 } 2611 }
2536 2612
2537 /* We might be setting the path sep back to a different
2538 form if we are reconnecting and the server switched its
2539 posix path capability for this share */
2540 if (sb && (CIFS_SB(sb)->prepathlen > 0))
2541 CIFS_SB(sb)->prepath[0] = CIFS_DIR_SEP(CIFS_SB(sb));
2542
2543 if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) { 2613 if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) {
2544 if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) { 2614 if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) {
2545 CIFS_SB(sb)->rsize = 127 * 1024; 2615 CIFS_SB(sb)->rsize = 127 * 1024;
@@ -2564,6 +2634,10 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
2564 cFYI(1, "very large read cap"); 2634 cFYI(1, "very large read cap");
2565 if (cap & CIFS_UNIX_LARGE_WRITE_CAP) 2635 if (cap & CIFS_UNIX_LARGE_WRITE_CAP)
2566 cFYI(1, "very large write cap"); 2636 cFYI(1, "very large write cap");
2637 if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP)
2638 cFYI(1, "transport encryption cap");
2639 if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
2640 cFYI(1, "mandatory transport encryption cap");
2567#endif /* CIFS_DEBUG2 */ 2641#endif /* CIFS_DEBUG2 */
2568 if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) { 2642 if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
2569 if (vol_info == NULL) { 2643 if (vol_info == NULL) {
@@ -2580,28 +2654,8 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
2580 } 2654 }
2581} 2655}
2582 2656
2583static void 2657void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
2584convert_delimiter(char *path, char delim) 2658 struct cifs_sb_info *cifs_sb)
2585{
2586 int i;
2587 char old_delim;
2588
2589 if (path == NULL)
2590 return;
2591
2592 if (delim == '/')
2593 old_delim = '\\';
2594 else
2595 old_delim = '/';
2596
2597 for (i = 0; path[i] != '\0'; i++) {
2598 if (path[i] == old_delim)
2599 path[i] = delim;
2600 }
2601}
2602
2603static void setup_cifs_sb(struct smb_vol *pvolume_info,
2604 struct cifs_sb_info *cifs_sb)
2605{ 2659{
2606 INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks); 2660 INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
2607 2661
@@ -2615,40 +2669,19 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
2615 else /* default */ 2669 else /* default */
2616 cifs_sb->rsize = CIFSMaxBufSize; 2670 cifs_sb->rsize = CIFSMaxBufSize;
2617 2671
2618 if (pvolume_info->wsize > PAGEVEC_SIZE * PAGE_CACHE_SIZE) {
2619 cERROR(1, "wsize %d too large, using 4096 instead",
2620 pvolume_info->wsize);
2621 cifs_sb->wsize = 4096;
2622 } else if (pvolume_info->wsize)
2623 cifs_sb->wsize = pvolume_info->wsize;
2624 else
2625 cifs_sb->wsize = min_t(const int,
2626 PAGEVEC_SIZE * PAGE_CACHE_SIZE,
2627 127*1024);
2628 /* old default of CIFSMaxBufSize was too small now
2629 that SMB Write2 can send multiple pages in kvec.
2630 RFC1001 does not describe what happens when frame
2631 bigger than 128K is sent so use that as max in
2632 conjunction with 52K kvec constraint on arch with 4K
2633 page size */
2634
2635 if (cifs_sb->rsize < 2048) { 2672 if (cifs_sb->rsize < 2048) {
2636 cifs_sb->rsize = 2048; 2673 cifs_sb->rsize = 2048;
2637 /* Windows ME may prefer this */ 2674 /* Windows ME may prefer this */
2638 cFYI(1, "readsize set to minimum: 2048"); 2675 cFYI(1, "readsize set to minimum: 2048");
2639 } 2676 }
2640 /* calculate prepath */ 2677
2641 cifs_sb->prepath = pvolume_info->prepath; 2678 /*
2642 if (cifs_sb->prepath) { 2679 * Temporarily set wsize for matching superblock. If we end up using
2643 cifs_sb->prepathlen = strlen(cifs_sb->prepath); 2680 * new sb then cifs_negotiate_wsize will later negotiate it downward
2644 /* we can not convert the / to \ in the path 2681 * if needed.
2645 separators in the prefixpath yet because we do not 2682 */
2646 know (until reset_cifs_unix_caps is called later) 2683 cifs_sb->wsize = pvolume_info->wsize;
2647 whether POSIX PATH CAP is available. We normalize 2684
2648 the / to \ after reset_cifs_unix_caps is called */
2649 pvolume_info->prepath = NULL;
2650 } else
2651 cifs_sb->prepathlen = 0;
2652 cifs_sb->mnt_uid = pvolume_info->linux_uid; 2685 cifs_sb->mnt_uid = pvolume_info->linux_uid;
2653 cifs_sb->mnt_gid = pvolume_info->linux_gid; 2686 cifs_sb->mnt_gid = pvolume_info->linux_gid;
2654 cifs_sb->mnt_file_mode = pvolume_info->file_mode; 2687 cifs_sb->mnt_file_mode = pvolume_info->file_mode;
@@ -2657,6 +2690,7 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
2657 cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode); 2690 cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode);
2658 2691
2659 cifs_sb->actimeo = pvolume_info->actimeo; 2692 cifs_sb->actimeo = pvolume_info->actimeo;
2693 cifs_sb->local_nls = pvolume_info->local_nls;
2660 2694
2661 if (pvolume_info->noperm) 2695 if (pvolume_info->noperm)
2662 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM; 2696 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
@@ -2676,6 +2710,8 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
2676 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOSSYNC; 2710 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOSSYNC;
2677 if (pvolume_info->mand_lock) 2711 if (pvolume_info->mand_lock)
2678 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOPOSIXBRL; 2712 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOPOSIXBRL;
2713 if (pvolume_info->rwpidforward)
2714 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD;
2679 if (pvolume_info->cifs_acl) 2715 if (pvolume_info->cifs_acl)
2680 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL; 2716 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL;
2681 if (pvolume_info->override_uid) 2717 if (pvolume_info->override_uid)
@@ -2709,8 +2745,55 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
2709 "mount option supported"); 2745 "mount option supported");
2710} 2746}
2711 2747
2748/*
2749 * When the server supports very large writes via POSIX extensions, we can
2750 * allow up to 2^24 - PAGE_CACHE_SIZE.
2751 *
2752 * Note that this might make for "interesting" allocation problems during
2753 * writeback however (as we have to allocate an array of pointers for the
2754 * pages). A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096.
2755 */
2756#define CIFS_MAX_WSIZE ((1<<24) - PAGE_CACHE_SIZE)
2757
2758/*
2759 * When the server doesn't allow large posix writes, default to a wsize of
2760 * 128k - PAGE_CACHE_SIZE -- one page less than the largest frame size
2761 * described in RFC1001. This allows space for the header without going over
2762 * that by default.
2763 */
2764#define CIFS_MAX_RFC1001_WSIZE (128 * 1024 - PAGE_CACHE_SIZE)
2765
2766/*
2767 * The default wsize is 1M. find_get_pages seems to return a maximum of 256
2768 * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill
2769 * a single wsize request with a single call.
2770 */
2771#define CIFS_DEFAULT_WSIZE (1024 * 1024)
2772
2773static unsigned int
2774cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
2775{
2776 __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
2777 struct TCP_Server_Info *server = tcon->ses->server;
2778 unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize :
2779 CIFS_DEFAULT_WSIZE;
2780
2781 /* can server support 24-bit write sizes? (via UNIX extensions) */
2782 if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
2783 wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1001_WSIZE);
2784
2785 /* no CAP_LARGE_WRITE_X? Limit it to 16 bits */
2786 if (!(server->capabilities & CAP_LARGE_WRITE_X))
2787 wsize = min_t(unsigned int, wsize, USHRT_MAX);
2788
2789 /* hard limit of CIFS_MAX_WSIZE */
2790 wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
2791
2792 return wsize;
2793}
2794
2712static int 2795static int
2713is_path_accessible(int xid, struct cifsTconInfo *tcon, 2796is_path_accessible(int xid, struct cifs_tcon *tcon,
2714 struct cifs_sb_info *cifs_sb, const char *full_path) 2797 struct cifs_sb_info *cifs_sb, const char *full_path)
2715{ 2798{
2716 int rc; 2799 int rc;
@@ -2733,8 +2816,8 @@ is_path_accessible(int xid, struct cifsTconInfo *tcon,
2733 return rc; 2816 return rc;
2734} 2817}
2735 2818
2736static void 2819void
2737cleanup_volume_info(struct smb_vol **pvolume_info) 2820cifs_cleanup_volume_info(struct smb_vol **pvolume_info)
2738{ 2821{
2739 struct smb_vol *volume_info; 2822 struct smb_vol *volume_info;
2740 2823
@@ -2764,24 +2847,13 @@ build_unc_path_to_root(const struct smb_vol *volume_info,
2764 char *full_path; 2847 char *full_path;
2765 2848
2766 int unc_len = strnlen(volume_info->UNC, MAX_TREE_SIZE + 1); 2849 int unc_len = strnlen(volume_info->UNC, MAX_TREE_SIZE + 1);
2767 full_path = kmalloc(unc_len + cifs_sb->prepathlen + 1, GFP_KERNEL); 2850 full_path = kmalloc(unc_len + 1, GFP_KERNEL);
2768 if (full_path == NULL) 2851 if (full_path == NULL)
2769 return ERR_PTR(-ENOMEM); 2852 return ERR_PTR(-ENOMEM);
2770 2853
2771 strncpy(full_path, volume_info->UNC, unc_len); 2854 strncpy(full_path, volume_info->UNC, unc_len);
2772 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) { 2855 full_path[unc_len] = 0; /* add trailing null */
2773 int i; 2856 convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
2774 for (i = 0; i < unc_len; i++) {
2775 if (full_path[i] == '\\')
2776 full_path[i] = '/';
2777 }
2778 }
2779
2780 if (cifs_sb->prepathlen)
2781 strncpy(full_path + unc_len, cifs_sb->prepath,
2782 cifs_sb->prepathlen);
2783
2784 full_path[unc_len + cifs_sb->prepathlen] = 0; /* add trailing null */
2785 return full_path; 2857 return full_path;
2786} 2858}
2787 2859
@@ -2796,7 +2868,7 @@ build_unc_path_to_root(const struct smb_vol *volume_info,
2796 * determine whether there were referrals. 2868 * determine whether there were referrals.
2797 */ 2869 */
2798static int 2870static int
2799expand_dfs_referral(int xid, struct cifsSesInfo *pSesInfo, 2871expand_dfs_referral(int xid, struct cifs_ses *pSesInfo,
2800 struct smb_vol *volume_info, struct cifs_sb_info *cifs_sb, 2872 struct smb_vol *volume_info, struct cifs_sb_info *cifs_sb,
2801 int check_prefix) 2873 int check_prefix)
2802{ 2874{
@@ -2840,40 +2912,13 @@ expand_dfs_referral(int xid, struct cifsSesInfo *pSesInfo,
2840} 2912}
2841#endif 2913#endif
2842 2914
2843int 2915int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data,
2844cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, 2916 const char *devname)
2845 const char *devname)
2846{ 2917{
2847 int rc;
2848 int xid;
2849 struct smb_vol *volume_info; 2918 struct smb_vol *volume_info;
2850 struct cifsSesInfo *pSesInfo; 2919 int rc = 0;
2851 struct cifsTconInfo *tcon;
2852 struct TCP_Server_Info *srvTcp;
2853 char *full_path;
2854 struct tcon_link *tlink;
2855#ifdef CONFIG_CIFS_DFS_UPCALL
2856 int referral_walks_count = 0;
2857try_mount_again:
2858 /* cleanup activities if we're chasing a referral */
2859 if (referral_walks_count) {
2860 if (tcon)
2861 cifs_put_tcon(tcon);
2862 else if (pSesInfo)
2863 cifs_put_smb_ses(pSesInfo);
2864
2865 cleanup_volume_info(&volume_info);
2866 FreeXid(xid);
2867 }
2868#endif
2869 rc = 0;
2870 tcon = NULL;
2871 pSesInfo = NULL;
2872 srvTcp = NULL;
2873 full_path = NULL;
2874 tlink = NULL;
2875 2920
2876 xid = GetXid(); 2921 *pvolume_info = NULL;
2877 2922
2878 volume_info = kzalloc(sizeof(struct smb_vol), GFP_KERNEL); 2923 volume_info = kzalloc(sizeof(struct smb_vol), GFP_KERNEL);
2879 if (!volume_info) { 2924 if (!volume_info) {
@@ -2881,7 +2926,7 @@ try_mount_again:
2881 goto out; 2926 goto out;
2882 } 2927 }
2883 2928
2884 if (cifs_parse_mount_options(cifs_sb->mountdata, devname, 2929 if (cifs_parse_mount_options(mount_data, devname,
2885 volume_info)) { 2930 volume_info)) {
2886 rc = -EINVAL; 2931 rc = -EINVAL;
2887 goto out; 2932 goto out;
@@ -2914,7 +2959,46 @@ try_mount_again:
2914 goto out; 2959 goto out;
2915 } 2960 }
2916 } 2961 }
2917 cifs_sb->local_nls = volume_info->local_nls; 2962
2963 *pvolume_info = volume_info;
2964 return rc;
2965out:
2966 cifs_cleanup_volume_info(&volume_info);
2967 return rc;
2968}
2969
2970int
2971cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
2972 struct smb_vol *volume_info, const char *devname)
2973{
2974 int rc = 0;
2975 int xid;
2976 struct cifs_ses *pSesInfo;
2977 struct cifs_tcon *tcon;
2978 struct TCP_Server_Info *srvTcp;
2979 char *full_path;
2980 struct tcon_link *tlink;
2981#ifdef CONFIG_CIFS_DFS_UPCALL
2982 int referral_walks_count = 0;
2983try_mount_again:
2984 /* cleanup activities if we're chasing a referral */
2985 if (referral_walks_count) {
2986 if (tcon)
2987 cifs_put_tcon(tcon);
2988 else if (pSesInfo)
2989 cifs_put_smb_ses(pSesInfo);
2990
2991 cifs_cleanup_volume_info(&volume_info);
2992 FreeXid(xid);
2993 }
2994#endif
2995 tcon = NULL;
2996 pSesInfo = NULL;
2997 srvTcp = NULL;
2998 full_path = NULL;
2999 tlink = NULL;
3000
3001 xid = GetXid();
2918 3002
2919 /* get a reference to a tcp session */ 3003 /* get a reference to a tcp session */
2920 srvTcp = cifs_get_tcp_session(volume_info); 3004 srvTcp = cifs_get_tcp_session(volume_info);
@@ -2931,7 +3015,6 @@ try_mount_again:
2931 goto mount_fail_check; 3015 goto mount_fail_check;
2932 } 3016 }
2933 3017
2934 setup_cifs_sb(volume_info, cifs_sb);
2935 if (pSesInfo->capabilities & CAP_LARGE_FILES) 3018 if (pSesInfo->capabilities & CAP_LARGE_FILES)
2936 sb->s_maxbytes = MAX_LFS_FILESIZE; 3019 sb->s_maxbytes = MAX_LFS_FILESIZE;
2937 else 3020 else
@@ -2948,35 +3031,36 @@ try_mount_again:
2948 goto remote_path_check; 3031 goto remote_path_check;
2949 } 3032 }
2950 3033
2951 /* do not care if following two calls succeed - informational */
2952 if (!tcon->ipc) {
2953 CIFSSMBQFSDeviceInfo(xid, tcon);
2954 CIFSSMBQFSAttributeInfo(xid, tcon);
2955 }
2956
2957 /* tell server which Unix caps we support */ 3034 /* tell server which Unix caps we support */
2958 if (tcon->ses->capabilities & CAP_UNIX) 3035 if (tcon->ses->capabilities & CAP_UNIX) {
2959 /* reset of caps checks mount to see if unix extensions 3036 /* reset of caps checks mount to see if unix extensions
2960 disabled for just this mount */ 3037 disabled for just this mount */
2961 reset_cifs_unix_caps(xid, tcon, sb, volume_info); 3038 reset_cifs_unix_caps(xid, tcon, sb, volume_info);
2962 else 3039 if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
3040 (le64_to_cpu(tcon->fsUnixInfo.Capability) &
3041 CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
3042 rc = -EACCES;
3043 goto mount_fail_check;
3044 }
3045 } else
2963 tcon->unix_ext = 0; /* server does not support them */ 3046 tcon->unix_ext = 0; /* server does not support them */
2964 3047
2965 /* convert forward to back slashes in prepath here if needed */ 3048 /* do not care if following two calls succeed - informational */
2966 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0) 3049 if (!tcon->ipc) {
2967 convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb)); 3050 CIFSSMBQFSDeviceInfo(xid, tcon);
3051 CIFSSMBQFSAttributeInfo(xid, tcon);
3052 }
2968 3053
2969 if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) { 3054 if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) {
2970 cifs_sb->rsize = 1024 * 127; 3055 cifs_sb->rsize = 1024 * 127;
2971 cFYI(DBG2, "no very large read support, rsize now 127K"); 3056 cFYI(DBG2, "no very large read support, rsize now 127K");
2972 } 3057 }
2973 if (!(tcon->ses->capabilities & CAP_LARGE_WRITE_X))
2974 cifs_sb->wsize = min(cifs_sb->wsize,
2975 (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE));
2976 if (!(tcon->ses->capabilities & CAP_LARGE_READ_X)) 3058 if (!(tcon->ses->capabilities & CAP_LARGE_READ_X))
2977 cifs_sb->rsize = min(cifs_sb->rsize, 3059 cifs_sb->rsize = min(cifs_sb->rsize,
2978 (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE)); 3060 (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE));
2979 3061
3062 cifs_sb->wsize = cifs_negotiate_wsize(tcon, volume_info);
3063
2980remote_path_check: 3064remote_path_check:
2981#ifdef CONFIG_CIFS_DFS_UPCALL 3065#ifdef CONFIG_CIFS_DFS_UPCALL
2982 /* 3066 /*
@@ -2996,10 +3080,10 @@ remote_path_check:
2996 } 3080 }
2997#endif 3081#endif
2998 3082
2999 /* check if a whole path (including prepath) is not remote */ 3083 /* check if a whole path is not remote */
3000 if (!rc && tcon) { 3084 if (!rc && tcon) {
3001 /* build_path_to_root works only when we have a valid tcon */ 3085 /* build_path_to_root works only when we have a valid tcon */
3002 full_path = cifs_build_path_to_root(cifs_sb, tcon); 3086 full_path = cifs_build_path_to_root(volume_info, cifs_sb, tcon);
3003 if (full_path == NULL) { 3087 if (full_path == NULL) {
3004 rc = -ENOMEM; 3088 rc = -ENOMEM;
3005 goto mount_fail_check; 3089 goto mount_fail_check;
@@ -3025,10 +3109,6 @@ remote_path_check:
3025 rc = -ELOOP; 3109 rc = -ELOOP;
3026 goto mount_fail_check; 3110 goto mount_fail_check;
3027 } 3111 }
3028 /* convert forward to back slashes in prepath here if needed */
3029 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0)
3030 convert_delimiter(cifs_sb->prepath,
3031 CIFS_DIR_SEP(cifs_sb));
3032 3112
3033 rc = expand_dfs_referral(xid, pSesInfo, volume_info, cifs_sb, 3113 rc = expand_dfs_referral(xid, pSesInfo, volume_info, cifs_sb,
3034 true); 3114 true);
@@ -3087,14 +3167,13 @@ mount_fail_check:
3087 password will be freed at unmount time) */ 3167 password will be freed at unmount time) */
3088out: 3168out:
3089 /* zero out password before freeing */ 3169 /* zero out password before freeing */
3090 cleanup_volume_info(&volume_info);
3091 FreeXid(xid); 3170 FreeXid(xid);
3092 return rc; 3171 return rc;
3093} 3172}
3094 3173
3095int 3174int
3096CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, 3175CIFSTCon(unsigned int xid, struct cifs_ses *ses,
3097 const char *tree, struct cifsTconInfo *tcon, 3176 const char *tree, struct cifs_tcon *tcon,
3098 const struct nls_table *nls_codepage) 3177 const struct nls_table *nls_codepage)
3099{ 3178{
3100 struct smb_hdr *smb_buffer; 3179 struct smb_hdr *smb_buffer;
@@ -3126,7 +3205,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
3126 pSMB->AndXCommand = 0xFF; 3205 pSMB->AndXCommand = 0xFF;
3127 pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO); 3206 pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
3128 bcc_ptr = &pSMB->Password[0]; 3207 bcc_ptr = &pSMB->Password[0];
3129 if ((ses->server->secMode) & SECMODE_USER) { 3208 if ((ses->server->sec_mode) & SECMODE_USER) {
3130 pSMB->PasswordLength = cpu_to_le16(1); /* minimum */ 3209 pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
3131 *bcc_ptr = 0; /* password is null byte */ 3210 *bcc_ptr = 0; /* password is null byte */
3132 bcc_ptr++; /* skip password */ 3211 bcc_ptr++; /* skip password */
@@ -3143,7 +3222,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
3143 if ((global_secflags & CIFSSEC_MAY_LANMAN) && 3222 if ((global_secflags & CIFSSEC_MAY_LANMAN) &&
3144 (ses->server->secType == LANMAN)) 3223 (ses->server->secType == LANMAN))
3145 calc_lanman_hash(tcon->password, ses->server->cryptkey, 3224 calc_lanman_hash(tcon->password, ses->server->cryptkey,
3146 ses->server->secMode & 3225 ses->server->sec_mode &
3147 SECMODE_PW_ENCRYPT ? true : false, 3226 SECMODE_PW_ENCRYPT ? true : false,
3148 bcc_ptr); 3227 bcc_ptr);
3149 else 3228 else
@@ -3159,7 +3238,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
3159 } 3238 }
3160 } 3239 }
3161 3240
3162 if (ses->server->secMode & 3241 if (ses->server->sec_mode &
3163 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 3242 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
3164 smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 3243 smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
3165 3244
@@ -3255,7 +3334,6 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
3255 struct rb_root *root = &cifs_sb->tlink_tree; 3334 struct rb_root *root = &cifs_sb->tlink_tree;
3256 struct rb_node *node; 3335 struct rb_node *node;
3257 struct tcon_link *tlink; 3336 struct tcon_link *tlink;
3258 char *tmp;
3259 3337
3260 cancel_delayed_work_sync(&cifs_sb->prune_tlinks); 3338 cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
3261 3339
@@ -3272,15 +3350,10 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
3272 } 3350 }
3273 spin_unlock(&cifs_sb->tlink_tree_lock); 3351 spin_unlock(&cifs_sb->tlink_tree_lock);
3274 3352
3275 tmp = cifs_sb->prepath;
3276 cifs_sb->prepathlen = 0;
3277 cifs_sb->prepath = NULL;
3278 kfree(tmp);
3279
3280 return 0; 3353 return 0;
3281} 3354}
3282 3355
3283int cifs_negotiate_protocol(unsigned int xid, struct cifsSesInfo *ses) 3356int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses)
3284{ 3357{
3285 int rc = 0; 3358 int rc = 0;
3286 struct TCP_Server_Info *server = ses->server; 3359 struct TCP_Server_Info *server = ses->server;
@@ -3310,7 +3383,7 @@ int cifs_negotiate_protocol(unsigned int xid, struct cifsSesInfo *ses)
3310} 3383}
3311 3384
3312 3385
3313int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses, 3386int cifs_setup_session(unsigned int xid, struct cifs_ses *ses,
3314 struct nls_table *nls_info) 3387 struct nls_table *nls_info)
3315{ 3388{
3316 int rc = 0; 3389 int rc = 0;
@@ -3322,7 +3395,7 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
3322 ses->capabilities &= (~CAP_UNIX); 3395 ses->capabilities &= (~CAP_UNIX);
3323 3396
3324 cFYI(1, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d", 3397 cFYI(1, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d",
3325 server->secMode, server->capabilities, server->timeAdj); 3398 server->sec_mode, server->capabilities, server->timeAdj);
3326 3399
3327 rc = CIFS_SessSetup(xid, ses, nls_info); 3400 rc = CIFS_SessSetup(xid, ses, nls_info);
3328 if (rc) { 3401 if (rc) {
@@ -3354,12 +3427,12 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
3354 return rc; 3427 return rc;
3355} 3428}
3356 3429
3357static struct cifsTconInfo * 3430static struct cifs_tcon *
3358cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid) 3431cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
3359{ 3432{
3360 struct cifsTconInfo *master_tcon = cifs_sb_master_tcon(cifs_sb); 3433 struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
3361 struct cifsSesInfo *ses; 3434 struct cifs_ses *ses;
3362 struct cifsTconInfo *tcon = NULL; 3435 struct cifs_tcon *tcon = NULL;
3363 struct smb_vol *vol_info; 3436 struct smb_vol *vol_info;
3364 char username[28]; /* big enough for "krb50x" + hex of ULONG_MAX 6+16 */ 3437 char username[28]; /* big enough for "krb50x" + hex of ULONG_MAX 6+16 */
3365 /* We used to have this as MAX_USERNAME which is */ 3438 /* We used to have this as MAX_USERNAME which is */
@@ -3392,7 +3465,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
3392 3465
3393 ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info); 3466 ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info);
3394 if (IS_ERR(ses)) { 3467 if (IS_ERR(ses)) {
3395 tcon = (struct cifsTconInfo *)ses; 3468 tcon = (struct cifs_tcon *)ses;
3396 cifs_put_tcp_session(master_tcon->ses->server); 3469 cifs_put_tcp_session(master_tcon->ses->server);
3397 goto out; 3470 goto out;
3398 } 3471 }
@@ -3417,7 +3490,7 @@ cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
3417 return cifs_sb->master_tlink; 3490 return cifs_sb->master_tlink;
3418} 3491}
3419 3492
3420struct cifsTconInfo * 3493struct cifs_tcon *
3421cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb) 3494cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
3422{ 3495{
3423 return tlink_tcon(cifs_sb_master_tlink(cifs_sb)); 3496 return tlink_tcon(cifs_sb_master_tlink(cifs_sb));
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 9ea65cf36714..81914df47ef1 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -50,12 +50,11 @@ build_path_from_dentry(struct dentry *direntry)
50{ 50{
51 struct dentry *temp; 51 struct dentry *temp;
52 int namelen; 52 int namelen;
53 int pplen;
54 int dfsplen; 53 int dfsplen;
55 char *full_path; 54 char *full_path;
56 char dirsep; 55 char dirsep;
57 struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); 56 struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
58 struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb); 57 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
59 58
60 if (direntry == NULL) 59 if (direntry == NULL)
61 return NULL; /* not much we can do if dentry is freed and 60 return NULL; /* not much we can do if dentry is freed and
@@ -63,13 +62,12 @@ build_path_from_dentry(struct dentry *direntry)
63 when the server crashed */ 62 when the server crashed */
64 63
65 dirsep = CIFS_DIR_SEP(cifs_sb); 64 dirsep = CIFS_DIR_SEP(cifs_sb);
66 pplen = cifs_sb->prepathlen;
67 if (tcon->Flags & SMB_SHARE_IS_IN_DFS) 65 if (tcon->Flags & SMB_SHARE_IS_IN_DFS)
68 dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1); 66 dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
69 else 67 else
70 dfsplen = 0; 68 dfsplen = 0;
71cifs_bp_rename_retry: 69cifs_bp_rename_retry:
72 namelen = pplen + dfsplen; 70 namelen = dfsplen;
73 for (temp = direntry; !IS_ROOT(temp);) { 71 for (temp = direntry; !IS_ROOT(temp);) {
74 namelen += (1 + temp->d_name.len); 72 namelen += (1 + temp->d_name.len);
75 temp = temp->d_parent; 73 temp = temp->d_parent;
@@ -100,7 +98,7 @@ cifs_bp_rename_retry:
100 return NULL; 98 return NULL;
101 } 99 }
102 } 100 }
103 if (namelen != pplen + dfsplen) { 101 if (namelen != dfsplen) {
104 cERROR(1, "did not end path lookup where expected namelen is %d", 102 cERROR(1, "did not end path lookup where expected namelen is %d",
105 namelen); 103 namelen);
106 /* presumably this is only possible if racing with a rename 104 /* presumably this is only possible if racing with a rename
@@ -126,7 +124,6 @@ cifs_bp_rename_retry:
126 } 124 }
127 } 125 }
128 } 126 }
129 strncpy(full_path + dfsplen, CIFS_SB(direntry->d_sb)->prepath, pplen);
130 return full_path; 127 return full_path;
131} 128}
132 129
@@ -152,7 +149,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
152 __u16 fileHandle; 149 __u16 fileHandle;
153 struct cifs_sb_info *cifs_sb; 150 struct cifs_sb_info *cifs_sb;
154 struct tcon_link *tlink; 151 struct tcon_link *tlink;
155 struct cifsTconInfo *tcon; 152 struct cifs_tcon *tcon;
156 char *full_path = NULL; 153 char *full_path = NULL;
157 FILE_ALL_INFO *buf = NULL; 154 FILE_ALL_INFO *buf = NULL;
158 struct inode *newinode = NULL; 155 struct inode *newinode = NULL;
@@ -356,7 +353,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
356 int xid; 353 int xid;
357 struct cifs_sb_info *cifs_sb; 354 struct cifs_sb_info *cifs_sb;
358 struct tcon_link *tlink; 355 struct tcon_link *tlink;
359 struct cifsTconInfo *pTcon; 356 struct cifs_tcon *pTcon;
357 struct cifs_io_parms io_parms;
360 char *full_path = NULL; 358 char *full_path = NULL;
361 struct inode *newinode = NULL; 359 struct inode *newinode = NULL;
362 int oplock = 0; 360 int oplock = 0;
@@ -439,16 +437,19 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
439 * timestamps in, but we can reuse it safely */ 437 * timestamps in, but we can reuse it safely */
440 438
441 pdev = (struct win_dev *)buf; 439 pdev = (struct win_dev *)buf;
440 io_parms.netfid = fileHandle;
441 io_parms.pid = current->tgid;
442 io_parms.tcon = pTcon;
443 io_parms.offset = 0;
444 io_parms.length = sizeof(struct win_dev);
442 if (S_ISCHR(mode)) { 445 if (S_ISCHR(mode)) {
443 memcpy(pdev->type, "IntxCHR", 8); 446 memcpy(pdev->type, "IntxCHR", 8);
444 pdev->major = 447 pdev->major =
445 cpu_to_le64(MAJOR(device_number)); 448 cpu_to_le64(MAJOR(device_number));
446 pdev->minor = 449 pdev->minor =
447 cpu_to_le64(MINOR(device_number)); 450 cpu_to_le64(MINOR(device_number));
448 rc = CIFSSMBWrite(xid, pTcon, 451 rc = CIFSSMBWrite(xid, &io_parms,
449 fileHandle, 452 &bytes_written, (char *)pdev,
450 sizeof(struct win_dev),
451 0, &bytes_written, (char *)pdev,
452 NULL, 0); 453 NULL, 0);
453 } else if (S_ISBLK(mode)) { 454 } else if (S_ISBLK(mode)) {
454 memcpy(pdev->type, "IntxBLK", 8); 455 memcpy(pdev->type, "IntxBLK", 8);
@@ -456,10 +457,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
456 cpu_to_le64(MAJOR(device_number)); 457 cpu_to_le64(MAJOR(device_number));
457 pdev->minor = 458 pdev->minor =
458 cpu_to_le64(MINOR(device_number)); 459 cpu_to_le64(MINOR(device_number));
459 rc = CIFSSMBWrite(xid, pTcon, 460 rc = CIFSSMBWrite(xid, &io_parms,
460 fileHandle, 461 &bytes_written, (char *)pdev,
461 sizeof(struct win_dev),
462 0, &bytes_written, (char *)pdev,
463 NULL, 0); 462 NULL, 0);
464 } /* else if (S_ISFIFO) */ 463 } /* else if (S_ISFIFO) */
465 CIFSSMBClose(xid, pTcon, fileHandle); 464 CIFSSMBClose(xid, pTcon, fileHandle);
@@ -486,7 +485,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
486 bool posix_open = false; 485 bool posix_open = false;
487 struct cifs_sb_info *cifs_sb; 486 struct cifs_sb_info *cifs_sb;
488 struct tcon_link *tlink; 487 struct tcon_link *tlink;
489 struct cifsTconInfo *pTcon; 488 struct cifs_tcon *pTcon;
490 struct cifsFileInfo *cfile; 489 struct cifsFileInfo *cfile;
491 struct inode *newInode = NULL; 490 struct inode *newInode = NULL;
492 char *full_path = NULL; 491 char *full_path = NULL;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index c672afef0c09..bb71471a4d9d 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -114,7 +114,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 114 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
115 struct cifs_fattr fattr; 115 struct cifs_fattr fattr;
116 struct tcon_link *tlink; 116 struct tcon_link *tlink;
117 struct cifsTconInfo *tcon; 117 struct cifs_tcon *tcon;
118 118
119 cFYI(1, "posix open %s", full_path); 119 cFYI(1, "posix open %s", full_path);
120 120
@@ -168,7 +168,7 @@ posix_open_ret:
168 168
169static int 169static int
170cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb, 170cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
171 struct cifsTconInfo *tcon, unsigned int f_flags, __u32 *poplock, 171 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
172 __u16 *pnetfid, int xid) 172 __u16 *pnetfid, int xid)
173{ 173{
174 int rc; 174 int rc;
@@ -285,7 +285,7 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
285void cifsFileInfo_put(struct cifsFileInfo *cifs_file) 285void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
286{ 286{
287 struct inode *inode = cifs_file->dentry->d_inode; 287 struct inode *inode = cifs_file->dentry->d_inode;
288 struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink); 288 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
289 struct cifsInodeInfo *cifsi = CIFS_I(inode); 289 struct cifsInodeInfo *cifsi = CIFS_I(inode);
290 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 290 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
291 struct cifsLockInfo *li, *tmp; 291 struct cifsLockInfo *li, *tmp;
@@ -343,7 +343,7 @@ int cifs_open(struct inode *inode, struct file *file)
343 int xid; 343 int xid;
344 __u32 oplock; 344 __u32 oplock;
345 struct cifs_sb_info *cifs_sb; 345 struct cifs_sb_info *cifs_sb;
346 struct cifsTconInfo *tcon; 346 struct cifs_tcon *tcon;
347 struct tcon_link *tlink; 347 struct tcon_link *tlink;
348 struct cifsFileInfo *pCifsFile = NULL; 348 struct cifsFileInfo *pCifsFile = NULL;
349 char *full_path = NULL; 349 char *full_path = NULL;
@@ -457,7 +457,7 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
457 int xid; 457 int xid;
458 __u32 oplock; 458 __u32 oplock;
459 struct cifs_sb_info *cifs_sb; 459 struct cifs_sb_info *cifs_sb;
460 struct cifsTconInfo *tcon; 460 struct cifs_tcon *tcon;
461 struct cifsInodeInfo *pCifsInode; 461 struct cifsInodeInfo *pCifsInode;
462 struct inode *inode; 462 struct inode *inode;
463 char *full_path = NULL; 463 char *full_path = NULL;
@@ -596,7 +596,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
596 xid = GetXid(); 596 xid = GetXid();
597 597
598 if (pCFileStruct) { 598 if (pCFileStruct) {
599 struct cifsTconInfo *pTcon = tlink_tcon(pCFileStruct->tlink); 599 struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
600 600
601 cFYI(1, "Freeing private data in close dir"); 601 cFYI(1, "Freeing private data in close dir");
602 spin_lock(&cifs_file_list_lock); 602 spin_lock(&cifs_file_list_lock);
@@ -653,7 +653,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
653 __u64 length; 653 __u64 length;
654 bool wait_flag = false; 654 bool wait_flag = false;
655 struct cifs_sb_info *cifs_sb; 655 struct cifs_sb_info *cifs_sb;
656 struct cifsTconInfo *tcon; 656 struct cifs_tcon *tcon;
657 __u16 netfid; 657 __u16 netfid;
658 __u8 lockType = LOCKING_ANDX_LARGE_FILES; 658 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
659 bool posix_locking = 0; 659 bool posix_locking = 0;
@@ -725,8 +725,8 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
725 else 725 else
726 posix_lock_type = CIFS_WRLCK; 726 posix_lock_type = CIFS_WRLCK;
727 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */, 727 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
728 length, pfLock, 728 length, pfLock, posix_lock_type,
729 posix_lock_type, wait_flag); 729 wait_flag);
730 FreeXid(xid); 730 FreeXid(xid);
731 return rc; 731 return rc;
732 } 732 }
@@ -797,8 +797,8 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
797 posix_lock_type = CIFS_UNLCK; 797 posix_lock_type = CIFS_UNLCK;
798 798
799 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */, 799 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
800 length, pfLock, 800 length, pfLock, posix_lock_type,
801 posix_lock_type, wait_flag); 801 wait_flag);
802 } else { 802 } else {
803 struct cifsFileInfo *fid = file->private_data; 803 struct cifsFileInfo *fid = file->private_data;
804 804
@@ -857,7 +857,7 @@ cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
857 cifsi->server_eof = end_of_write; 857 cifsi->server_eof = end_of_write;
858} 858}
859 859
860static ssize_t cifs_write(struct cifsFileInfo *open_file, 860static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
861 const char *write_data, size_t write_size, 861 const char *write_data, size_t write_size,
862 loff_t *poffset) 862 loff_t *poffset)
863{ 863{
@@ -865,10 +865,11 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
865 unsigned int bytes_written = 0; 865 unsigned int bytes_written = 0;
866 unsigned int total_written; 866 unsigned int total_written;
867 struct cifs_sb_info *cifs_sb; 867 struct cifs_sb_info *cifs_sb;
868 struct cifsTconInfo *pTcon; 868 struct cifs_tcon *pTcon;
869 int xid; 869 int xid;
870 struct dentry *dentry = open_file->dentry; 870 struct dentry *dentry = open_file->dentry;
871 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode); 871 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
872 struct cifs_io_parms io_parms;
872 873
873 cifs_sb = CIFS_SB(dentry->d_sb); 874 cifs_sb = CIFS_SB(dentry->d_sb);
874 875
@@ -901,8 +902,13 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
901 /* iov[0] is reserved for smb header */ 902 /* iov[0] is reserved for smb header */
902 iov[1].iov_base = (char *)write_data + total_written; 903 iov[1].iov_base = (char *)write_data + total_written;
903 iov[1].iov_len = len; 904 iov[1].iov_len = len;
904 rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid, len, 905 io_parms.netfid = open_file->netfid;
905 *poffset, &bytes_written, iov, 1, 0); 906 io_parms.pid = pid;
907 io_parms.tcon = pTcon;
908 io_parms.offset = *poffset;
909 io_parms.length = len;
910 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
911 1, 0);
906 } 912 }
907 if (rc || (bytes_written == 0)) { 913 if (rc || (bytes_written == 0)) {
908 if (total_written) 914 if (total_written)
@@ -1071,8 +1077,8 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1071 1077
1072 open_file = find_writable_file(CIFS_I(mapping->host), false); 1078 open_file = find_writable_file(CIFS_I(mapping->host), false);
1073 if (open_file) { 1079 if (open_file) {
1074 bytes_written = cifs_write(open_file, write_data, 1080 bytes_written = cifs_write(open_file, open_file->pid,
1075 to - from, &offset); 1081 write_data, to - from, &offset);
1076 cifsFileInfo_put(open_file); 1082 cifsFileInfo_put(open_file);
1077 /* Does mm or vfs already set times? */ 1083 /* Does mm or vfs already set times? */
1078 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb); 1084 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
@@ -1092,58 +1098,20 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1092static int cifs_writepages(struct address_space *mapping, 1098static int cifs_writepages(struct address_space *mapping,
1093 struct writeback_control *wbc) 1099 struct writeback_control *wbc)
1094{ 1100{
1095 unsigned int bytes_to_write; 1101 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1096 unsigned int bytes_written; 1102 bool done = false, scanned = false, range_whole = false;
1097 struct cifs_sb_info *cifs_sb; 1103 pgoff_t end, index;
1098 int done = 0; 1104 struct cifs_writedata *wdata;
1099 pgoff_t end;
1100 pgoff_t index;
1101 int range_whole = 0;
1102 struct kvec *iov;
1103 int len;
1104 int n_iov = 0;
1105 pgoff_t next;
1106 int nr_pages;
1107 __u64 offset = 0;
1108 struct cifsFileInfo *open_file;
1109 struct cifsTconInfo *tcon;
1110 struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
1111 struct page *page; 1105 struct page *page;
1112 struct pagevec pvec;
1113 int rc = 0; 1106 int rc = 0;
1114 int scanned = 0;
1115 int xid;
1116
1117 cifs_sb = CIFS_SB(mapping->host->i_sb);
1118 1107
1119 /* 1108 /*
1120 * If wsize is smaller that the page cache size, default to writing 1109 * If wsize is smaller than the page cache size, default to writing
1121 * one page at a time via cifs_writepage 1110 * one page at a time via cifs_writepage
1122 */ 1111 */
1123 if (cifs_sb->wsize < PAGE_CACHE_SIZE) 1112 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1124 return generic_writepages(mapping, wbc); 1113 return generic_writepages(mapping, wbc);
1125 1114
1126 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1127 if (iov == NULL)
1128 return generic_writepages(mapping, wbc);
1129
1130 /*
1131 * if there's no open file, then this is likely to fail too,
1132 * but it'll at least handle the return. Maybe it should be
1133 * a BUG() instead?
1134 */
1135 open_file = find_writable_file(CIFS_I(mapping->host), false);
1136 if (!open_file) {
1137 kfree(iov);
1138 return generic_writepages(mapping, wbc);
1139 }
1140
1141 tcon = tlink_tcon(open_file->tlink);
1142 cifsFileInfo_put(open_file);
1143
1144 xid = GetXid();
1145
1146 pagevec_init(&pvec, 0);
1147 if (wbc->range_cyclic) { 1115 if (wbc->range_cyclic) {
1148 index = mapping->writeback_index; /* Start from prev offset */ 1116 index = mapping->writeback_index; /* Start from prev offset */
1149 end = -1; 1117 end = -1;
@@ -1151,24 +1119,49 @@ static int cifs_writepages(struct address_space *mapping,
1151 index = wbc->range_start >> PAGE_CACHE_SHIFT; 1119 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1152 end = wbc->range_end >> PAGE_CACHE_SHIFT; 1120 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1153 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 1121 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1154 range_whole = 1; 1122 range_whole = true;
1155 scanned = 1; 1123 scanned = true;
1156 } 1124 }
1157retry: 1125retry:
1158 while (!done && (index <= end) && 1126 while (!done && index <= end) {
1159 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 1127 unsigned int i, nr_pages, found_pages;
1160 PAGECACHE_TAG_DIRTY, 1128 pgoff_t next = 0, tofind;
1161 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) { 1129 struct page **pages;
1162 int first; 1130
1163 unsigned int i; 1131 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1164 1132 end - index) + 1;
1165 first = -1; 1133
1166 next = 0; 1134 wdata = cifs_writedata_alloc((unsigned int)tofind);
1167 n_iov = 0; 1135 if (!wdata) {
1168 bytes_to_write = 0; 1136 rc = -ENOMEM;
1169 1137 break;
1170 for (i = 0; i < nr_pages; i++) { 1138 }
1171 page = pvec.pages[i]; 1139
1140 /*
1141 * find_get_pages_tag seems to return a max of 256 on each
1142 * iteration, so we must call it several times in order to
1143 * fill the array or the wsize is effectively limited to
1144 * 256 * PAGE_CACHE_SIZE.
1145 */
1146 found_pages = 0;
1147 pages = wdata->pages;
1148 do {
1149 nr_pages = find_get_pages_tag(mapping, &index,
1150 PAGECACHE_TAG_DIRTY,
1151 tofind, pages);
1152 found_pages += nr_pages;
1153 tofind -= nr_pages;
1154 pages += nr_pages;
1155 } while (nr_pages && tofind && index <= end);
1156
1157 if (found_pages == 0) {
1158 kref_put(&wdata->refcount, cifs_writedata_release);
1159 break;
1160 }
1161
1162 nr_pages = 0;
1163 for (i = 0; i < found_pages; i++) {
1164 page = wdata->pages[i];
1172 /* 1165 /*
1173 * At this point we hold neither mapping->tree_lock nor 1166 * At this point we hold neither mapping->tree_lock nor
1174 * lock on the page itself: the page may be truncated or 1167 * lock on the page itself: the page may be truncated or
@@ -1177,7 +1170,7 @@ retry:
1177 * mapping 1170 * mapping
1178 */ 1171 */
1179 1172
1180 if (first < 0) 1173 if (nr_pages == 0)
1181 lock_page(page); 1174 lock_page(page);
1182 else if (!trylock_page(page)) 1175 else if (!trylock_page(page))
1183 break; 1176 break;
@@ -1188,7 +1181,7 @@ retry:
1188 } 1181 }
1189 1182
1190 if (!wbc->range_cyclic && page->index > end) { 1183 if (!wbc->range_cyclic && page->index > end) {
1191 done = 1; 1184 done = true;
1192 unlock_page(page); 1185 unlock_page(page);
1193 break; 1186 break;
1194 } 1187 }
@@ -1215,119 +1208,89 @@ retry:
1215 set_page_writeback(page); 1208 set_page_writeback(page);
1216 1209
1217 if (page_offset(page) >= mapping->host->i_size) { 1210 if (page_offset(page) >= mapping->host->i_size) {
1218 done = 1; 1211 done = true;
1219 unlock_page(page); 1212 unlock_page(page);
1220 end_page_writeback(page); 1213 end_page_writeback(page);
1221 break; 1214 break;
1222 } 1215 }
1223 1216
1224 /* 1217 wdata->pages[i] = page;
1225 * BB can we get rid of this? pages are held by pvec 1218 next = page->index + 1;
1226 */ 1219 ++nr_pages;
1227 page_cache_get(page); 1220 }
1228 1221
1229 len = min(mapping->host->i_size - page_offset(page), 1222 /* reset index to refind any pages skipped */
1230 (loff_t)PAGE_CACHE_SIZE); 1223 if (nr_pages == 0)
1224 index = wdata->pages[0]->index + 1;
1231 1225
1232 /* reserve iov[0] for the smb header */ 1226 /* put any pages we aren't going to use */
1233 n_iov++; 1227 for (i = nr_pages; i < found_pages; i++) {
1234 iov[n_iov].iov_base = kmap(page); 1228 page_cache_release(wdata->pages[i]);
1235 iov[n_iov].iov_len = len; 1229 wdata->pages[i] = NULL;
1236 bytes_to_write += len; 1230 }
1237 1231
1238 if (first < 0) { 1232 /* nothing to write? */
1239 first = i; 1233 if (nr_pages == 0) {
1240 offset = page_offset(page); 1234 kref_put(&wdata->refcount, cifs_writedata_release);
1241 } 1235 continue;
1242 next = page->index + 1;
1243 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1244 break;
1245 } 1236 }
1246 if (n_iov) {
1247retry_write:
1248 open_file = find_writable_file(CIFS_I(mapping->host),
1249 false);
1250 if (!open_file) {
1251 cERROR(1, "No writable handles for inode");
1252 rc = -EBADF;
1253 } else {
1254 rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
1255 bytes_to_write, offset,
1256 &bytes_written, iov, n_iov,
1257 0);
1258 cifsFileInfo_put(open_file);
1259 }
1260 1237
1261 cFYI(1, "Write2 rc=%d, wrote=%u", rc, bytes_written); 1238 wdata->sync_mode = wbc->sync_mode;
1239 wdata->nr_pages = nr_pages;
1240 wdata->offset = page_offset(wdata->pages[0]);
1262 1241
1263 /* 1242 do {
1264 * For now, treat a short write as if nothing got 1243 if (wdata->cfile != NULL)
1265 * written. A zero length write however indicates 1244 cifsFileInfo_put(wdata->cfile);
1266 * ENOSPC or EFBIG. We have no way to know which 1245 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1267 * though, so call it ENOSPC for now. EFBIG would 1246 false);
1268 * get translated to AS_EIO anyway. 1247 if (!wdata->cfile) {
1269 * 1248 cERROR(1, "No writable handles for inode");
1270 * FIXME: make it take into account the data that did 1249 rc = -EBADF;
1271 * get written 1250 break;
1272 */
1273 if (rc == 0) {
1274 if (bytes_written == 0)
1275 rc = -ENOSPC;
1276 else if (bytes_written < bytes_to_write)
1277 rc = -EAGAIN;
1278 } 1251 }
1252 rc = cifs_async_writev(wdata);
1253 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
1279 1254
1280 /* retry on data-integrity flush */ 1255 for (i = 0; i < nr_pages; ++i)
1281 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) 1256 unlock_page(wdata->pages[i]);
1282 goto retry_write;
1283
1284 /* fix the stats and EOF */
1285 if (bytes_written > 0) {
1286 cifs_stats_bytes_written(tcon, bytes_written);
1287 cifs_update_eof(cifsi, offset, bytes_written);
1288 }
1289 1257
1290 for (i = 0; i < n_iov; i++) { 1258 /* send failure -- clean up the mess */
1291 page = pvec.pages[first + i]; 1259 if (rc != 0) {
1292 /* on retryable write error, redirty page */ 1260 for (i = 0; i < nr_pages; ++i) {
1293 if (rc == -EAGAIN) 1261 if (rc == -EAGAIN)
1294 redirty_page_for_writepage(wbc, page); 1262 redirty_page_for_writepage(wbc,
1295 else if (rc != 0) 1263 wdata->pages[i]);
1296 SetPageError(page); 1264 else
1297 kunmap(page); 1265 SetPageError(wdata->pages[i]);
1298 unlock_page(page); 1266 end_page_writeback(wdata->pages[i]);
1299 end_page_writeback(page); 1267 page_cache_release(wdata->pages[i]);
1300 page_cache_release(page);
1301 } 1268 }
1302
1303 if (rc != -EAGAIN) 1269 if (rc != -EAGAIN)
1304 mapping_set_error(mapping, rc); 1270 mapping_set_error(mapping, rc);
1305 else 1271 }
1306 rc = 0; 1272 kref_put(&wdata->refcount, cifs_writedata_release);
1307 1273
1308 if ((wbc->nr_to_write -= n_iov) <= 0) 1274 wbc->nr_to_write -= nr_pages;
1309 done = 1; 1275 if (wbc->nr_to_write <= 0)
1310 index = next; 1276 done = true;
1311 } else
1312 /* Need to re-find the pages we skipped */
1313 index = pvec.pages[0]->index + 1;
1314 1277
1315 pagevec_release(&pvec); 1278 index = next;
1316 } 1279 }
1280
1317 if (!scanned && !done) { 1281 if (!scanned && !done) {
1318 /* 1282 /*
1319 * We hit the last page and there is more work to be done: wrap 1283 * We hit the last page and there is more work to be done: wrap
1320 * back to the start of the file 1284 * back to the start of the file
1321 */ 1285 */
1322 scanned = 1; 1286 scanned = true;
1323 index = 0; 1287 index = 0;
1324 goto retry; 1288 goto retry;
1325 } 1289 }
1290
1326 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 1291 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1327 mapping->writeback_index = index; 1292 mapping->writeback_index = index;
1328 1293
1329 FreeXid(xid);
1330 kfree(iov);
1331 return rc; 1294 return rc;
1332} 1295}
1333 1296
@@ -1383,6 +1346,14 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
1383{ 1346{
1384 int rc; 1347 int rc;
1385 struct inode *inode = mapping->host; 1348 struct inode *inode = mapping->host;
1349 struct cifsFileInfo *cfile = file->private_data;
1350 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1351 __u32 pid;
1352
1353 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1354 pid = cfile->pid;
1355 else
1356 pid = current->tgid;
1386 1357
1387 cFYI(1, "write_end for page %p from pos %lld with %d bytes", 1358 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1388 page, pos, copied); 1359 page, pos, copied);
@@ -1406,8 +1377,7 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
1406 /* BB check if anything else missing out of ppw 1377 /* BB check if anything else missing out of ppw
1407 such as updating last write time */ 1378 such as updating last write time */
1408 page_data = kmap(page); 1379 page_data = kmap(page);
1409 rc = cifs_write(file->private_data, page_data + offset, 1380 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
1410 copied, &pos);
1411 /* if (rc < 0) should we set writebehind rc? */ 1381 /* if (rc < 0) should we set writebehind rc? */
1412 kunmap(page); 1382 kunmap(page);
1413 1383
@@ -1435,7 +1405,7 @@ int cifs_strict_fsync(struct file *file, int datasync)
1435{ 1405{
1436 int xid; 1406 int xid;
1437 int rc = 0; 1407 int rc = 0;
1438 struct cifsTconInfo *tcon; 1408 struct cifs_tcon *tcon;
1439 struct cifsFileInfo *smbfile = file->private_data; 1409 struct cifsFileInfo *smbfile = file->private_data;
1440 struct inode *inode = file->f_path.dentry->d_inode; 1410 struct inode *inode = file->f_path.dentry->d_inode;
1441 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1411 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
@@ -1465,7 +1435,7 @@ int cifs_fsync(struct file *file, int datasync)
1465{ 1435{
1466 int xid; 1436 int xid;
1467 int rc = 0; 1437 int rc = 0;
1468 struct cifsTconInfo *tcon; 1438 struct cifs_tcon *tcon;
1469 struct cifsFileInfo *smbfile = file->private_data; 1439 struct cifsFileInfo *smbfile = file->private_data;
1470 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 1440 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1471 1441
@@ -1556,9 +1526,11 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
1556 struct iov_iter it; 1526 struct iov_iter it;
1557 struct inode *inode; 1527 struct inode *inode;
1558 struct cifsFileInfo *open_file; 1528 struct cifsFileInfo *open_file;
1559 struct cifsTconInfo *pTcon; 1529 struct cifs_tcon *pTcon;
1560 struct cifs_sb_info *cifs_sb; 1530 struct cifs_sb_info *cifs_sb;
1531 struct cifs_io_parms io_parms;
1561 int xid, rc; 1532 int xid, rc;
1533 __u32 pid;
1562 1534
1563 len = iov_length(iov, nr_segs); 1535 len = iov_length(iov, nr_segs);
1564 if (!len) 1536 if (!len)
@@ -1590,6 +1562,12 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
1590 1562
1591 xid = GetXid(); 1563 xid = GetXid();
1592 open_file = file->private_data; 1564 open_file = file->private_data;
1565
1566 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1567 pid = open_file->pid;
1568 else
1569 pid = current->tgid;
1570
1593 pTcon = tlink_tcon(open_file->tlink); 1571 pTcon = tlink_tcon(open_file->tlink);
1594 inode = file->f_path.dentry->d_inode; 1572 inode = file->f_path.dentry->d_inode;
1595 1573
@@ -1616,9 +1594,13 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
1616 if (rc != 0) 1594 if (rc != 0)
1617 break; 1595 break;
1618 } 1596 }
1619 rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid, 1597 io_parms.netfid = open_file->netfid;
1620 cur_len, *poffset, &written, 1598 io_parms.pid = pid;
1621 to_send, npages, 0); 1599 io_parms.tcon = pTcon;
1600 io_parms.offset = *poffset;
1601 io_parms.length = cur_len;
1602 rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
1603 npages, 0);
1622 } while (rc == -EAGAIN); 1604 } while (rc == -EAGAIN);
1623 1605
1624 for (i = 0; i < npages; i++) 1606 for (i = 0; i < npages; i++)
@@ -1711,10 +1693,12 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
1711 size_t len, cur_len; 1693 size_t len, cur_len;
1712 int iov_offset = 0; 1694 int iov_offset = 0;
1713 struct cifs_sb_info *cifs_sb; 1695 struct cifs_sb_info *cifs_sb;
1714 struct cifsTconInfo *pTcon; 1696 struct cifs_tcon *pTcon;
1715 struct cifsFileInfo *open_file; 1697 struct cifsFileInfo *open_file;
1716 struct smb_com_read_rsp *pSMBr; 1698 struct smb_com_read_rsp *pSMBr;
1699 struct cifs_io_parms io_parms;
1717 char *read_data; 1700 char *read_data;
1701 __u32 pid;
1718 1702
1719 if (!nr_segs) 1703 if (!nr_segs)
1720 return 0; 1704 return 0;
@@ -1729,6 +1713,11 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
1729 open_file = file->private_data; 1713 open_file = file->private_data;
1730 pTcon = tlink_tcon(open_file->tlink); 1714 pTcon = tlink_tcon(open_file->tlink);
1731 1715
1716 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1717 pid = open_file->pid;
1718 else
1719 pid = current->tgid;
1720
1732 if ((file->f_flags & O_ACCMODE) == O_WRONLY) 1721 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1733 cFYI(1, "attempting read on write only file instance"); 1722 cFYI(1, "attempting read on write only file instance");
1734 1723
@@ -1744,8 +1733,12 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
1744 if (rc != 0) 1733 if (rc != 0)
1745 break; 1734 break;
1746 } 1735 }
1747 rc = CIFSSMBRead(xid, pTcon, open_file->netfid, 1736 io_parms.netfid = open_file->netfid;
1748 cur_len, *poffset, &bytes_read, 1737 io_parms.pid = pid;
1738 io_parms.tcon = pTcon;
1739 io_parms.offset = *poffset;
1740 io_parms.length = len;
1741 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
1749 &read_data, &buf_type); 1742 &read_data, &buf_type);
1750 pSMBr = (struct smb_com_read_rsp *)read_data; 1743 pSMBr = (struct smb_com_read_rsp *)read_data;
1751 if (read_data) { 1744 if (read_data) {
@@ -1822,11 +1815,13 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1822 unsigned int total_read; 1815 unsigned int total_read;
1823 unsigned int current_read_size; 1816 unsigned int current_read_size;
1824 struct cifs_sb_info *cifs_sb; 1817 struct cifs_sb_info *cifs_sb;
1825 struct cifsTconInfo *pTcon; 1818 struct cifs_tcon *pTcon;
1826 int xid; 1819 int xid;
1827 char *current_offset; 1820 char *current_offset;
1828 struct cifsFileInfo *open_file; 1821 struct cifsFileInfo *open_file;
1822 struct cifs_io_parms io_parms;
1829 int buf_type = CIFS_NO_BUFFER; 1823 int buf_type = CIFS_NO_BUFFER;
1824 __u32 pid;
1830 1825
1831 xid = GetXid(); 1826 xid = GetXid();
1832 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 1827 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
@@ -1839,6 +1834,11 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1839 open_file = file->private_data; 1834 open_file = file->private_data;
1840 pTcon = tlink_tcon(open_file->tlink); 1835 pTcon = tlink_tcon(open_file->tlink);
1841 1836
1837 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1838 pid = open_file->pid;
1839 else
1840 pid = current->tgid;
1841
1842 if ((file->f_flags & O_ACCMODE) == O_WRONLY) 1842 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1843 cFYI(1, "attempting read on write only file instance"); 1843 cFYI(1, "attempting read on write only file instance");
1844 1844
@@ -1861,11 +1861,13 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1861 if (rc != 0) 1861 if (rc != 0)
1862 break; 1862 break;
1863 } 1863 }
1864 rc = CIFSSMBRead(xid, pTcon, 1864 io_parms.netfid = open_file->netfid;
1865 open_file->netfid, 1865 io_parms.pid = pid;
1866 current_read_size, *poffset, 1866 io_parms.tcon = pTcon;
1867 &bytes_read, &current_offset, 1867 io_parms.offset = *poffset;
1868 &buf_type); 1868 io_parms.length = current_read_size;
1869 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
1870 &current_offset, &buf_type);
1869 } 1871 }
1870 if (rc || (bytes_read == 0)) { 1872 if (rc || (bytes_read == 0)) {
1871 if (total_read) { 1873 if (total_read) {
@@ -1996,13 +1998,15 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
1996 loff_t offset; 1998 loff_t offset;
1997 struct page *page; 1999 struct page *page;
1998 struct cifs_sb_info *cifs_sb; 2000 struct cifs_sb_info *cifs_sb;
1999 struct cifsTconInfo *pTcon; 2001 struct cifs_tcon *pTcon;
2000 unsigned int bytes_read = 0; 2002 unsigned int bytes_read = 0;
2001 unsigned int read_size, i; 2003 unsigned int read_size, i;
2002 char *smb_read_data = NULL; 2004 char *smb_read_data = NULL;
2003 struct smb_com_read_rsp *pSMBr; 2005 struct smb_com_read_rsp *pSMBr;
2004 struct cifsFileInfo *open_file; 2006 struct cifsFileInfo *open_file;
2007 struct cifs_io_parms io_parms;
2005 int buf_type = CIFS_NO_BUFFER; 2008 int buf_type = CIFS_NO_BUFFER;
2009 __u32 pid;
2006 2010
2007 xid = GetXid(); 2011 xid = GetXid();
2008 if (file->private_data == NULL) { 2012 if (file->private_data == NULL) {
@@ -2024,6 +2028,11 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
2024 goto read_complete; 2028 goto read_complete;
2025 2029
2026 cFYI(DBG2, "rpages: num pages %d", num_pages); 2030 cFYI(DBG2, "rpages: num pages %d", num_pages);
2031 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2032 pid = open_file->pid;
2033 else
2034 pid = current->tgid;
2035
2027 for (i = 0; i < num_pages; ) { 2036 for (i = 0; i < num_pages; ) {
2028 unsigned contig_pages; 2037 unsigned contig_pages;
2029 struct page *tmp_page; 2038 struct page *tmp_page;
@@ -2065,12 +2074,13 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
2065 if (rc != 0) 2074 if (rc != 0)
2066 break; 2075 break;
2067 } 2076 }
2068 2077 io_parms.netfid = open_file->netfid;
2069 rc = CIFSSMBRead(xid, pTcon, 2078 io_parms.pid = pid;
2070 open_file->netfid, 2079 io_parms.tcon = pTcon;
2071 read_size, offset, 2080 io_parms.offset = offset;
2072 &bytes_read, &smb_read_data, 2081 io_parms.length = read_size;
2073 &buf_type); 2082 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2083 &smb_read_data, &buf_type);
2074 /* BB more RC checks ? */ 2084 /* BB more RC checks ? */
2075 if (rc == -EAGAIN) { 2085 if (rc == -EAGAIN) {
2076 if (smb_read_data) { 2086 if (smb_read_data) {
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index 297a43d0ff7f..d368a47ba5eb 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -40,7 +40,7 @@ void cifs_fscache_release_client_cookie(struct TCP_Server_Info *server)
40 server->fscache = NULL; 40 server->fscache = NULL;
41} 41}
42 42
43void cifs_fscache_get_super_cookie(struct cifsTconInfo *tcon) 43void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
44{ 44{
45 struct TCP_Server_Info *server = tcon->ses->server; 45 struct TCP_Server_Info *server = tcon->ses->server;
46 46
@@ -51,7 +51,7 @@ void cifs_fscache_get_super_cookie(struct cifsTconInfo *tcon)
51 server->fscache, tcon->fscache); 51 server->fscache, tcon->fscache);
52} 52}
53 53
54void cifs_fscache_release_super_cookie(struct cifsTconInfo *tcon) 54void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon)
55{ 55{
56 cFYI(1, "CIFS: releasing superblock cookie (0x%p)", tcon->fscache); 56 cFYI(1, "CIFS: releasing superblock cookie (0x%p)", tcon->fscache);
57 fscache_relinquish_cookie(tcon->fscache, 0); 57 fscache_relinquish_cookie(tcon->fscache, 0);
@@ -62,7 +62,7 @@ static void cifs_fscache_enable_inode_cookie(struct inode *inode)
62{ 62{
63 struct cifsInodeInfo *cifsi = CIFS_I(inode); 63 struct cifsInodeInfo *cifsi = CIFS_I(inode);
64 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 64 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
65 struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb); 65 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
66 66
67 if (cifsi->fscache) 67 if (cifsi->fscache)
68 return; 68 return;
diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h
index 31b88ec2341e..63539323e0b9 100644
--- a/fs/cifs/fscache.h
+++ b/fs/cifs/fscache.h
@@ -40,8 +40,8 @@ extern void cifs_fscache_unregister(void);
40 */ 40 */
41extern void cifs_fscache_get_client_cookie(struct TCP_Server_Info *); 41extern void cifs_fscache_get_client_cookie(struct TCP_Server_Info *);
42extern void cifs_fscache_release_client_cookie(struct TCP_Server_Info *); 42extern void cifs_fscache_release_client_cookie(struct TCP_Server_Info *);
43extern void cifs_fscache_get_super_cookie(struct cifsTconInfo *); 43extern void cifs_fscache_get_super_cookie(struct cifs_tcon *);
44extern void cifs_fscache_release_super_cookie(struct cifsTconInfo *); 44extern void cifs_fscache_release_super_cookie(struct cifs_tcon *);
45 45
46extern void cifs_fscache_release_inode_cookie(struct inode *); 46extern void cifs_fscache_release_inode_cookie(struct inode *);
47extern void cifs_fscache_set_inode_cookie(struct inode *, struct file *); 47extern void cifs_fscache_set_inode_cookie(struct inode *, struct file *);
@@ -99,9 +99,9 @@ static inline void
99cifs_fscache_get_client_cookie(struct TCP_Server_Info *server) {} 99cifs_fscache_get_client_cookie(struct TCP_Server_Info *server) {}
100static inline void 100static inline void
101cifs_fscache_release_client_cookie(struct TCP_Server_Info *server) {} 101cifs_fscache_release_client_cookie(struct TCP_Server_Info *server) {}
102static inline void cifs_fscache_get_super_cookie(struct cifsTconInfo *tcon) {} 102static inline void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) {}
103static inline void 103static inline void
104cifs_fscache_release_super_cookie(struct cifsTconInfo *tcon) {} 104cifs_fscache_release_super_cookie(struct cifs_tcon *tcon) {}
105 105
106static inline void cifs_fscache_release_inode_cookie(struct inode *inode) {} 106static inline void cifs_fscache_release_inode_cookie(struct inode *inode) {}
107static inline void cifs_fscache_set_inode_cookie(struct inode *inode, 107static inline void cifs_fscache_set_inode_cookie(struct inode *inode,
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index de02ed5e25c2..9b018c8334fa 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -295,7 +295,7 @@ int cifs_get_file_info_unix(struct file *filp)
295 struct inode *inode = filp->f_path.dentry->d_inode; 295 struct inode *inode = filp->f_path.dentry->d_inode;
296 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 296 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
297 struct cifsFileInfo *cfile = filp->private_data; 297 struct cifsFileInfo *cfile = filp->private_data;
298 struct cifsTconInfo *tcon = tlink_tcon(cfile->tlink); 298 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
299 299
300 xid = GetXid(); 300 xid = GetXid();
301 rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->netfid, &find_data); 301 rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->netfid, &find_data);
@@ -318,7 +318,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
318 int rc; 318 int rc;
319 FILE_UNIX_BASIC_INFO find_data; 319 FILE_UNIX_BASIC_INFO find_data;
320 struct cifs_fattr fattr; 320 struct cifs_fattr fattr;
321 struct cifsTconInfo *tcon; 321 struct cifs_tcon *tcon;
322 struct tcon_link *tlink; 322 struct tcon_link *tlink;
323 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 323 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
324 324
@@ -373,7 +373,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
373 int oplock = 0; 373 int oplock = 0;
374 __u16 netfid; 374 __u16 netfid;
375 struct tcon_link *tlink; 375 struct tcon_link *tlink;
376 struct cifsTconInfo *tcon; 376 struct cifs_tcon *tcon;
377 struct cifs_io_parms io_parms;
377 char buf[24]; 378 char buf[24];
378 unsigned int bytes_read; 379 unsigned int bytes_read;
379 char *pbuf; 380 char *pbuf;
@@ -405,9 +406,13 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
405 if (rc == 0) { 406 if (rc == 0) {
406 int buf_type = CIFS_NO_BUFFER; 407 int buf_type = CIFS_NO_BUFFER;
407 /* Read header */ 408 /* Read header */
408 rc = CIFSSMBRead(xid, tcon, netfid, 409 io_parms.netfid = netfid;
409 24 /* length */, 0 /* offset */, 410 io_parms.pid = current->tgid;
410 &bytes_read, &pbuf, &buf_type); 411 io_parms.tcon = tcon;
412 io_parms.offset = 0;
413 io_parms.length = 24;
414 rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf,
415 &buf_type);
411 if ((rc == 0) && (bytes_read >= 8)) { 416 if ((rc == 0) && (bytes_read >= 8)) {
412 if (memcmp("IntxBLK", pbuf, 8) == 0) { 417 if (memcmp("IntxBLK", pbuf, 8) == 0) {
413 cFYI(1, "Block device"); 418 cFYI(1, "Block device");
@@ -468,7 +473,7 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
468 char ea_value[4]; 473 char ea_value[4];
469 __u32 mode; 474 __u32 mode;
470 struct tcon_link *tlink; 475 struct tcon_link *tlink;
471 struct cifsTconInfo *tcon; 476 struct cifs_tcon *tcon;
472 477
473 tlink = cifs_sb_tlink(cifs_sb); 478 tlink = cifs_sb_tlink(cifs_sb);
474 if (IS_ERR(tlink)) 479 if (IS_ERR(tlink))
@@ -502,7 +507,7 @@ static void
502cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info, 507cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
503 struct cifs_sb_info *cifs_sb, bool adjust_tz) 508 struct cifs_sb_info *cifs_sb, bool adjust_tz)
504{ 509{
505 struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb); 510 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
506 511
507 memset(fattr, 0, sizeof(*fattr)); 512 memset(fattr, 0, sizeof(*fattr));
508 fattr->cf_cifsattrs = le32_to_cpu(info->Attributes); 513 fattr->cf_cifsattrs = le32_to_cpu(info->Attributes);
@@ -553,7 +558,7 @@ int cifs_get_file_info(struct file *filp)
553 struct inode *inode = filp->f_path.dentry->d_inode; 558 struct inode *inode = filp->f_path.dentry->d_inode;
554 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 559 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
555 struct cifsFileInfo *cfile = filp->private_data; 560 struct cifsFileInfo *cfile = filp->private_data;
556 struct cifsTconInfo *tcon = tlink_tcon(cfile->tlink); 561 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
557 562
558 xid = GetXid(); 563 xid = GetXid();
559 rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data); 564 rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data);
@@ -590,7 +595,7 @@ int cifs_get_inode_info(struct inode **pinode,
590 struct super_block *sb, int xid, const __u16 *pfid) 595 struct super_block *sb, int xid, const __u16 *pfid)
591{ 596{
592 int rc = 0, tmprc; 597 int rc = 0, tmprc;
593 struct cifsTconInfo *pTcon; 598 struct cifs_tcon *pTcon;
594 struct tcon_link *tlink; 599 struct tcon_link *tlink;
595 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 600 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
596 char *buf = NULL; 601 char *buf = NULL;
@@ -735,10 +740,10 @@ static const struct inode_operations cifs_ipc_inode_ops = {
735 .lookup = cifs_lookup, 740 .lookup = cifs_lookup,
736}; 741};
737 742
738char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb, 743char *cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
739 struct cifsTconInfo *tcon) 744 struct cifs_tcon *tcon)
740{ 745{
741 int pplen = cifs_sb->prepathlen; 746 int pplen = vol->prepath ? strlen(vol->prepath) : 0;
742 int dfsplen; 747 int dfsplen;
743 char *full_path = NULL; 748 char *full_path = NULL;
744 749
@@ -772,7 +777,7 @@ char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb,
772 } 777 }
773 } 778 }
774 } 779 }
775 strncpy(full_path + dfsplen, cifs_sb->prepath, pplen); 780 strncpy(full_path + dfsplen, vol->prepath, pplen);
776 full_path[dfsplen + pplen] = 0; /* add trailing null */ 781 full_path[dfsplen + pplen] = 0; /* add trailing null */
777 return full_path; 782 return full_path;
778} 783}
@@ -884,19 +889,13 @@ struct inode *cifs_root_iget(struct super_block *sb)
884 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 889 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
885 struct inode *inode = NULL; 890 struct inode *inode = NULL;
886 long rc; 891 long rc;
887 char *full_path; 892 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
888 struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
889
890 full_path = cifs_build_path_to_root(cifs_sb, tcon);
891 if (full_path == NULL)
892 return ERR_PTR(-ENOMEM);
893 893
894 xid = GetXid(); 894 xid = GetXid();
895 if (tcon->unix_ext) 895 if (tcon->unix_ext)
896 rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid); 896 rc = cifs_get_inode_info_unix(&inode, "", sb, xid);
897 else 897 else
898 rc = cifs_get_inode_info(&inode, full_path, NULL, sb, 898 rc = cifs_get_inode_info(&inode, "", NULL, sb, xid, NULL);
899 xid, NULL);
900 899
901 if (!inode) { 900 if (!inode) {
902 inode = ERR_PTR(rc); 901 inode = ERR_PTR(rc);
@@ -922,7 +921,6 @@ struct inode *cifs_root_iget(struct super_block *sb)
922 } 921 }
923 922
924out: 923out:
925 kfree(full_path);
926 /* can not call macro FreeXid here since in a void func 924 /* can not call macro FreeXid here since in a void func
927 * TODO: This is no longer true 925 * TODO: This is no longer true
928 */ 926 */
@@ -943,7 +941,7 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, int xid,
943 struct cifsInodeInfo *cifsInode = CIFS_I(inode); 941 struct cifsInodeInfo *cifsInode = CIFS_I(inode);
944 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 942 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
945 struct tcon_link *tlink = NULL; 943 struct tcon_link *tlink = NULL;
946 struct cifsTconInfo *pTcon; 944 struct cifs_tcon *pTcon;
947 FILE_BASIC_INFO info_buf; 945 FILE_BASIC_INFO info_buf;
948 946
949 if (attrs == NULL) 947 if (attrs == NULL)
@@ -1061,7 +1059,7 @@ cifs_rename_pending_delete(char *full_path, struct dentry *dentry, int xid)
1061 struct cifsInodeInfo *cifsInode = CIFS_I(inode); 1059 struct cifsInodeInfo *cifsInode = CIFS_I(inode);
1062 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1060 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1063 struct tcon_link *tlink; 1061 struct tcon_link *tlink;
1064 struct cifsTconInfo *tcon; 1062 struct cifs_tcon *tcon;
1065 __u32 dosattr, origattr; 1063 __u32 dosattr, origattr;
1066 FILE_BASIC_INFO *info_buf = NULL; 1064 FILE_BASIC_INFO *info_buf = NULL;
1067 1065
@@ -1179,7 +1177,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
1179 struct super_block *sb = dir->i_sb; 1177 struct super_block *sb = dir->i_sb;
1180 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 1178 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
1181 struct tcon_link *tlink; 1179 struct tcon_link *tlink;
1182 struct cifsTconInfo *tcon; 1180 struct cifs_tcon *tcon;
1183 struct iattr *attrs = NULL; 1181 struct iattr *attrs = NULL;
1184 __u32 dosattr = 0, origattr = 0; 1182 __u32 dosattr = 0, origattr = 0;
1185 1183
@@ -1277,7 +1275,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
1277 int xid; 1275 int xid;
1278 struct cifs_sb_info *cifs_sb; 1276 struct cifs_sb_info *cifs_sb;
1279 struct tcon_link *tlink; 1277 struct tcon_link *tlink;
1280 struct cifsTconInfo *pTcon; 1278 struct cifs_tcon *pTcon;
1281 char *full_path = NULL; 1279 char *full_path = NULL;
1282 struct inode *newinode = NULL; 1280 struct inode *newinode = NULL;
1283 struct cifs_fattr fattr; 1281 struct cifs_fattr fattr;
@@ -1455,7 +1453,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
1455 int xid; 1453 int xid;
1456 struct cifs_sb_info *cifs_sb; 1454 struct cifs_sb_info *cifs_sb;
1457 struct tcon_link *tlink; 1455 struct tcon_link *tlink;
1458 struct cifsTconInfo *pTcon; 1456 struct cifs_tcon *pTcon;
1459 char *full_path = NULL; 1457 char *full_path = NULL;
1460 struct cifsInodeInfo *cifsInode; 1458 struct cifsInodeInfo *cifsInode;
1461 1459
@@ -1512,7 +1510,7 @@ cifs_do_rename(int xid, struct dentry *from_dentry, const char *fromPath,
1512{ 1510{
1513 struct cifs_sb_info *cifs_sb = CIFS_SB(from_dentry->d_sb); 1511 struct cifs_sb_info *cifs_sb = CIFS_SB(from_dentry->d_sb);
1514 struct tcon_link *tlink; 1512 struct tcon_link *tlink;
1515 struct cifsTconInfo *pTcon; 1513 struct cifs_tcon *pTcon;
1516 __u16 srcfid; 1514 __u16 srcfid;
1517 int oplock, rc; 1515 int oplock, rc;
1518 1516
@@ -1564,7 +1562,7 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
1564 char *toName = NULL; 1562 char *toName = NULL;
1565 struct cifs_sb_info *cifs_sb; 1563 struct cifs_sb_info *cifs_sb;
1566 struct tcon_link *tlink; 1564 struct tcon_link *tlink;
1567 struct cifsTconInfo *tcon; 1565 struct cifs_tcon *tcon;
1568 FILE_UNIX_BASIC_INFO *info_buf_source = NULL; 1566 FILE_UNIX_BASIC_INFO *info_buf_source = NULL;
1569 FILE_UNIX_BASIC_INFO *info_buf_target; 1567 FILE_UNIX_BASIC_INFO *info_buf_target;
1570 int xid, rc, tmprc; 1568 int xid, rc, tmprc;
@@ -1794,7 +1792,7 @@ int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
1794 struct kstat *stat) 1792 struct kstat *stat)
1795{ 1793{
1796 struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb); 1794 struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
1797 struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb); 1795 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
1798 struct inode *inode = dentry->d_inode; 1796 struct inode *inode = dentry->d_inode;
1799 int rc; 1797 int rc;
1800 1798
@@ -1872,7 +1870,8 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
1872 struct cifsInodeInfo *cifsInode = CIFS_I(inode); 1870 struct cifsInodeInfo *cifsInode = CIFS_I(inode);
1873 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1871 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1874 struct tcon_link *tlink = NULL; 1872 struct tcon_link *tlink = NULL;
1875 struct cifsTconInfo *pTcon = NULL; 1873 struct cifs_tcon *pTcon = NULL;
1874 struct cifs_io_parms io_parms;
1876 1875
1877 /* 1876 /*
1878 * To avoid spurious oplock breaks from server, in the case of 1877 * To avoid spurious oplock breaks from server, in the case of
@@ -1894,8 +1893,14 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
1894 cFYI(1, "SetFSize for attrs rc = %d", rc); 1893 cFYI(1, "SetFSize for attrs rc = %d", rc);
1895 if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { 1894 if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1896 unsigned int bytes_written; 1895 unsigned int bytes_written;
1897 rc = CIFSSMBWrite(xid, pTcon, nfid, 0, attrs->ia_size, 1896
1898 &bytes_written, NULL, NULL, 1); 1897 io_parms.netfid = nfid;
1898 io_parms.pid = npid;
1899 io_parms.tcon = pTcon;
1900 io_parms.offset = 0;
1901 io_parms.length = attrs->ia_size;
1902 rc = CIFSSMBWrite(xid, &io_parms, &bytes_written,
1903 NULL, NULL, 1);
1899 cFYI(1, "Wrt seteof rc %d", rc); 1904 cFYI(1, "Wrt seteof rc %d", rc);
1900 } 1905 }
1901 } else 1906 } else
@@ -1930,10 +1935,15 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
1930 CIFS_MOUNT_MAP_SPECIAL_CHR); 1935 CIFS_MOUNT_MAP_SPECIAL_CHR);
1931 if (rc == 0) { 1936 if (rc == 0) {
1932 unsigned int bytes_written; 1937 unsigned int bytes_written;
1933 rc = CIFSSMBWrite(xid, pTcon, netfid, 0, 1938
1934 attrs->ia_size, 1939 io_parms.netfid = netfid;
1935 &bytes_written, NULL, 1940 io_parms.pid = current->tgid;
1936 NULL, 1); 1941 io_parms.tcon = pTcon;
1942 io_parms.offset = 0;
1943 io_parms.length = attrs->ia_size;
1944 rc = CIFSSMBWrite(xid, &io_parms,
1945 &bytes_written,
1946 NULL, NULL, 1);
1937 cFYI(1, "wrt seteof rc %d", rc); 1947 cFYI(1, "wrt seteof rc %d", rc);
1938 CIFSSMBClose(xid, pTcon, netfid); 1948 CIFSSMBClose(xid, pTcon, netfid);
1939 } 1949 }
@@ -1961,7 +1971,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
1961 struct cifsInodeInfo *cifsInode = CIFS_I(inode); 1971 struct cifsInodeInfo *cifsInode = CIFS_I(inode);
1962 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1972 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1963 struct tcon_link *tlink; 1973 struct tcon_link *tlink;
1964 struct cifsTconInfo *pTcon; 1974 struct cifs_tcon *pTcon;
1965 struct cifs_unix_set_info_args *args = NULL; 1975 struct cifs_unix_set_info_args *args = NULL;
1966 struct cifsFileInfo *open_file; 1976 struct cifsFileInfo *open_file;
1967 1977
@@ -2247,7 +2257,7 @@ cifs_setattr(struct dentry *direntry, struct iattr *attrs)
2247{ 2257{
2248 struct inode *inode = direntry->d_inode; 2258 struct inode *inode = direntry->d_inode;
2249 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 2259 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2250 struct cifsTconInfo *pTcon = cifs_sb_master_tcon(cifs_sb); 2260 struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb);
2251 2261
2252 if (pTcon->unix_ext) 2262 if (pTcon->unix_ext)
2253 return cifs_setattr_unix(direntry, attrs); 2263 return cifs_setattr_unix(direntry, attrs);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 0c98672d0122..4221b5e48a42 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -38,7 +38,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
38 struct cifs_sb_info *cifs_sb; 38 struct cifs_sb_info *cifs_sb;
39#ifdef CONFIG_CIFS_POSIX 39#ifdef CONFIG_CIFS_POSIX
40 struct cifsFileInfo *pSMBFile = filep->private_data; 40 struct cifsFileInfo *pSMBFile = filep->private_data;
41 struct cifsTconInfo *tcon; 41 struct cifs_tcon *tcon;
42 __u64 ExtAttrBits = 0; 42 __u64 ExtAttrBits = 0;
43 __u64 ExtAttrMask = 0; 43 __u64 ExtAttrMask = 0;
44 __u64 caps; 44 __u64 caps;
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index ce417a9764a3..556b1a0b54de 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -175,7 +175,7 @@ CIFSFormatMFSymlink(u8 *buf, unsigned int buf_len, const char *link_str)
175} 175}
176 176
177static int 177static int
178CIFSCreateMFSymLink(const int xid, struct cifsTconInfo *tcon, 178CIFSCreateMFSymLink(const int xid, struct cifs_tcon *tcon,
179 const char *fromName, const char *toName, 179 const char *fromName, const char *toName,
180 const struct nls_table *nls_codepage, int remap) 180 const struct nls_table *nls_codepage, int remap)
181{ 181{
@@ -184,6 +184,7 @@ CIFSCreateMFSymLink(const int xid, struct cifsTconInfo *tcon,
184 __u16 netfid = 0; 184 __u16 netfid = 0;
185 u8 *buf; 185 u8 *buf;
186 unsigned int bytes_written = 0; 186 unsigned int bytes_written = 0;
187 struct cifs_io_parms io_parms;
187 188
188 buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL); 189 buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
189 if (!buf) 190 if (!buf)
@@ -203,10 +204,13 @@ CIFSCreateMFSymLink(const int xid, struct cifsTconInfo *tcon,
203 return rc; 204 return rc;
204 } 205 }
205 206
206 rc = CIFSSMBWrite(xid, tcon, netfid, 207 io_parms.netfid = netfid;
207 CIFS_MF_SYMLINK_FILE_SIZE /* length */, 208 io_parms.pid = current->tgid;
208 0 /* offset */, 209 io_parms.tcon = tcon;
209 &bytes_written, buf, NULL, 0); 210 io_parms.offset = 0;
211 io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
212
213 rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, buf, NULL, 0);
210 CIFSSMBClose(xid, tcon, netfid); 214 CIFSSMBClose(xid, tcon, netfid);
211 kfree(buf); 215 kfree(buf);
212 if (rc != 0) 216 if (rc != 0)
@@ -219,7 +223,7 @@ CIFSCreateMFSymLink(const int xid, struct cifsTconInfo *tcon,
219} 223}
220 224
221static int 225static int
222CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon, 226CIFSQueryMFSymLink(const int xid, struct cifs_tcon *tcon,
223 const unsigned char *searchName, char **symlinkinfo, 227 const unsigned char *searchName, char **symlinkinfo,
224 const struct nls_table *nls_codepage, int remap) 228 const struct nls_table *nls_codepage, int remap)
225{ 229{
@@ -231,6 +235,7 @@ CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon,
231 unsigned int bytes_read = 0; 235 unsigned int bytes_read = 0;
232 int buf_type = CIFS_NO_BUFFER; 236 int buf_type = CIFS_NO_BUFFER;
233 unsigned int link_len = 0; 237 unsigned int link_len = 0;
238 struct cifs_io_parms io_parms;
234 FILE_ALL_INFO file_info; 239 FILE_ALL_INFO file_info;
235 240
236 rc = CIFSSMBOpen(xid, tcon, searchName, FILE_OPEN, GENERIC_READ, 241 rc = CIFSSMBOpen(xid, tcon, searchName, FILE_OPEN, GENERIC_READ,
@@ -249,11 +254,13 @@ CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon,
249 if (!buf) 254 if (!buf)
250 return -ENOMEM; 255 return -ENOMEM;
251 pbuf = buf; 256 pbuf = buf;
257 io_parms.netfid = netfid;
258 io_parms.pid = current->tgid;
259 io_parms.tcon = tcon;
260 io_parms.offset = 0;
261 io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
252 262
253 rc = CIFSSMBRead(xid, tcon, netfid, 263 rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type);
254 CIFS_MF_SYMLINK_FILE_SIZE /* length */,
255 0 /* offset */,
256 &bytes_read, &pbuf, &buf_type);
257 CIFSSMBClose(xid, tcon, netfid); 264 CIFSSMBClose(xid, tcon, netfid);
258 if (rc != 0) { 265 if (rc != 0) {
259 kfree(buf); 266 kfree(buf);
@@ -291,7 +298,8 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr,
291 int oplock = 0; 298 int oplock = 0;
292 __u16 netfid = 0; 299 __u16 netfid = 0;
293 struct tcon_link *tlink; 300 struct tcon_link *tlink;
294 struct cifsTconInfo *pTcon; 301 struct cifs_tcon *pTcon;
302 struct cifs_io_parms io_parms;
295 u8 *buf; 303 u8 *buf;
296 char *pbuf; 304 char *pbuf;
297 unsigned int bytes_read = 0; 305 unsigned int bytes_read = 0;
@@ -328,11 +336,13 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr,
328 goto out; 336 goto out;
329 } 337 }
330 pbuf = buf; 338 pbuf = buf;
339 io_parms.netfid = netfid;
340 io_parms.pid = current->tgid;
341 io_parms.tcon = pTcon;
342 io_parms.offset = 0;
343 io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
331 344
332 rc = CIFSSMBRead(xid, pTcon, netfid, 345 rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type);
333 CIFS_MF_SYMLINK_FILE_SIZE /* length */,
334 0 /* offset */,
335 &bytes_read, &pbuf, &buf_type);
336 CIFSSMBClose(xid, pTcon, netfid); 346 CIFSSMBClose(xid, pTcon, netfid);
337 if (rc != 0) { 347 if (rc != 0) {
338 kfree(buf); 348 kfree(buf);
@@ -370,7 +380,7 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
370 char *toName = NULL; 380 char *toName = NULL;
371 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 381 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
372 struct tcon_link *tlink; 382 struct tcon_link *tlink;
373 struct cifsTconInfo *pTcon; 383 struct cifs_tcon *pTcon;
374 struct cifsInodeInfo *cifsInode; 384 struct cifsInodeInfo *cifsInode;
375 385
376 tlink = cifs_sb_tlink(cifs_sb); 386 tlink = cifs_sb_tlink(cifs_sb);
@@ -445,7 +455,7 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
445 char *target_path = NULL; 455 char *target_path = NULL;
446 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 456 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
447 struct tcon_link *tlink = NULL; 457 struct tcon_link *tlink = NULL;
448 struct cifsTconInfo *tcon; 458 struct cifs_tcon *tcon;
449 459
450 xid = GetXid(); 460 xid = GetXid();
451 461
@@ -518,7 +528,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
518 int xid; 528 int xid;
519 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 529 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
520 struct tcon_link *tlink; 530 struct tcon_link *tlink;
521 struct cifsTconInfo *pTcon; 531 struct cifs_tcon *pTcon;
522 char *full_path = NULL; 532 char *full_path = NULL;
523 struct inode *newinode = NULL; 533 struct inode *newinode = NULL;
524 534
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 907531ac5888..03a1f491d39b 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -67,12 +67,12 @@ _FreeXid(unsigned int xid)
67 spin_unlock(&GlobalMid_Lock); 67 spin_unlock(&GlobalMid_Lock);
68} 68}
69 69
70struct cifsSesInfo * 70struct cifs_ses *
71sesInfoAlloc(void) 71sesInfoAlloc(void)
72{ 72{
73 struct cifsSesInfo *ret_buf; 73 struct cifs_ses *ret_buf;
74 74
75 ret_buf = kzalloc(sizeof(struct cifsSesInfo), GFP_KERNEL); 75 ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
76 if (ret_buf) { 76 if (ret_buf) {
77 atomic_inc(&sesInfoAllocCount); 77 atomic_inc(&sesInfoAllocCount);
78 ret_buf->status = CifsNew; 78 ret_buf->status = CifsNew;
@@ -85,7 +85,7 @@ sesInfoAlloc(void)
85} 85}
86 86
87void 87void
88sesInfoFree(struct cifsSesInfo *buf_to_free) 88sesInfoFree(struct cifs_ses *buf_to_free)
89{ 89{
90 if (buf_to_free == NULL) { 90 if (buf_to_free == NULL) {
91 cFYI(1, "Null buffer passed to sesInfoFree"); 91 cFYI(1, "Null buffer passed to sesInfoFree");
@@ -105,11 +105,11 @@ sesInfoFree(struct cifsSesInfo *buf_to_free)
105 kfree(buf_to_free); 105 kfree(buf_to_free);
106} 106}
107 107
108struct cifsTconInfo * 108struct cifs_tcon *
109tconInfoAlloc(void) 109tconInfoAlloc(void)
110{ 110{
111 struct cifsTconInfo *ret_buf; 111 struct cifs_tcon *ret_buf;
112 ret_buf = kzalloc(sizeof(struct cifsTconInfo), GFP_KERNEL); 112 ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
113 if (ret_buf) { 113 if (ret_buf) {
114 atomic_inc(&tconInfoAllocCount); 114 atomic_inc(&tconInfoAllocCount);
115 ret_buf->tidStatus = CifsNew; 115 ret_buf->tidStatus = CifsNew;
@@ -124,7 +124,7 @@ tconInfoAlloc(void)
124} 124}
125 125
126void 126void
127tconInfoFree(struct cifsTconInfo *buf_to_free) 127tconInfoFree(struct cifs_tcon *buf_to_free)
128{ 128{
129 if (buf_to_free == NULL) { 129 if (buf_to_free == NULL) {
130 cFYI(1, "Null buffer passed to tconInfoFree"); 130 cFYI(1, "Null buffer passed to tconInfoFree");
@@ -295,11 +295,11 @@ __u16 GetNextMid(struct TCP_Server_Info *server)
295 case it is responsbility of caller to set the mid */ 295 case it is responsbility of caller to set the mid */
296void 296void
297header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , 297header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
298 const struct cifsTconInfo *treeCon, int word_count 298 const struct cifs_tcon *treeCon, int word_count
299 /* length of fixed section (word count) in two byte units */) 299 /* length of fixed section (word count) in two byte units */)
300{ 300{
301 struct list_head *temp_item; 301 struct list_head *temp_item;
302 struct cifsSesInfo *ses; 302 struct cifs_ses *ses;
303 char *temp = (char *) buffer; 303 char *temp = (char *) buffer;
304 304
305 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */ 305 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
@@ -359,7 +359,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
359 "did not match tcon uid"); 359 "did not match tcon uid");
360 spin_lock(&cifs_tcp_ses_lock); 360 spin_lock(&cifs_tcp_ses_lock);
361 list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) { 361 list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
362 ses = list_entry(temp_item, struct cifsSesInfo, smb_ses_list); 362 ses = list_entry(temp_item, struct cifs_ses, smb_ses_list);
363 if (ses->linux_uid == current_fsuid()) { 363 if (ses->linux_uid == current_fsuid()) {
364 if (ses->server == treeCon->ses->server) { 364 if (ses->server == treeCon->ses->server) {
365 cFYI(1, "found matching uid substitute right smb_uid"); 365 cFYI(1, "found matching uid substitute right smb_uid");
@@ -380,7 +380,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
380 if (treeCon->nocase) 380 if (treeCon->nocase)
381 buffer->Flags |= SMBFLG_CASELESS; 381 buffer->Flags |= SMBFLG_CASELESS;
382 if ((treeCon->ses) && (treeCon->ses->server)) 382 if ((treeCon->ses) && (treeCon->ses->server))
383 if (treeCon->ses->server->secMode & 383 if (treeCon->ses->server->sec_mode &
384 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 384 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
385 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 385 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
386 } 386 }
@@ -507,8 +507,8 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
507{ 507{
508 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf; 508 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
509 struct list_head *tmp, *tmp1, *tmp2; 509 struct list_head *tmp, *tmp1, *tmp2;
510 struct cifsSesInfo *ses; 510 struct cifs_ses *ses;
511 struct cifsTconInfo *tcon; 511 struct cifs_tcon *tcon;
512 struct cifsInodeInfo *pCifsInode; 512 struct cifsInodeInfo *pCifsInode;
513 struct cifsFileInfo *netfile; 513 struct cifsFileInfo *netfile;
514 514
@@ -566,9 +566,9 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
566 /* look up tcon based on tid & uid */ 566 /* look up tcon based on tid & uid */
567 spin_lock(&cifs_tcp_ses_lock); 567 spin_lock(&cifs_tcp_ses_lock);
568 list_for_each(tmp, &srv->smb_ses_list) { 568 list_for_each(tmp, &srv->smb_ses_list) {
569 ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); 569 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
570 list_for_each(tmp1, &ses->tcon_list) { 570 list_for_each(tmp1, &ses->tcon_list) {
571 tcon = list_entry(tmp1, struct cifsTconInfo, tcon_list); 571 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
572 if (tcon->tid != buf->Tid) 572 if (tcon->tid != buf->Tid)
573 continue; 573 continue;
574 574
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 79b71c2c7c9d..73e47e84b61a 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -836,7 +836,7 @@ ntstatus_to_dos(__u32 ntstatus, __u8 *eclass, __u16 *ecode)
836} 836}
837 837
838int 838int
839map_smb_to_linux_error(struct smb_hdr *smb, int logErr) 839map_smb_to_linux_error(struct smb_hdr *smb, bool logErr)
840{ 840{
841 unsigned int i; 841 unsigned int i;
842 int rc = -EIO; /* if transport error smb error may not be set */ 842 int rc = -EIO; /* if transport error smb error may not be set */
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index f8e4cd2a7912..6751e745bbc6 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -195,7 +195,7 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
195 int len; 195 int len;
196 int oplock = 0; 196 int oplock = 0;
197 int rc; 197 int rc;
198 struct cifsTconInfo *ptcon = cifs_sb_tcon(cifs_sb); 198 struct cifs_tcon *ptcon = cifs_sb_tcon(cifs_sb);
199 char *tmpbuffer; 199 char *tmpbuffer;
200 200
201 rc = CIFSSMBOpen(xid, ptcon, full_path, FILE_OPEN, GENERIC_READ, 201 rc = CIFSSMBOpen(xid, ptcon, full_path, FILE_OPEN, GENERIC_READ,
@@ -223,7 +223,7 @@ static int initiate_cifs_search(const int xid, struct file *file)
223 struct cifsFileInfo *cifsFile; 223 struct cifsFileInfo *cifsFile;
224 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 224 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
225 struct tcon_link *tlink = NULL; 225 struct tcon_link *tlink = NULL;
226 struct cifsTconInfo *pTcon; 226 struct cifs_tcon *pTcon;
227 227
228 if (file->private_data == NULL) { 228 if (file->private_data == NULL) {
229 tlink = cifs_sb_tlink(cifs_sb); 229 tlink = cifs_sb_tlink(cifs_sb);
@@ -496,7 +496,7 @@ static int cifs_save_resume_key(const char *current_entry,
496 assume that they are located in the findfirst return buffer.*/ 496 assume that they are located in the findfirst return buffer.*/
497/* We start counting in the buffer with entry 2 and increment for every 497/* We start counting in the buffer with entry 2 and increment for every
498 entry (do not increment for . or .. entry) */ 498 entry (do not increment for . or .. entry) */
499static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon, 499static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
500 struct file *file, char **ppCurrentEntry, int *num_to_ret) 500 struct file *file, char **ppCurrentEntry, int *num_to_ret)
501{ 501{
502 int rc = 0; 502 int rc = 0;
@@ -764,7 +764,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
764{ 764{
765 int rc = 0; 765 int rc = 0;
766 int xid, i; 766 int xid, i;
767 struct cifsTconInfo *pTcon; 767 struct cifs_tcon *pTcon;
768 struct cifsFileInfo *cifsFile = NULL; 768 struct cifsFileInfo *cifsFile = NULL;
769 char *current_entry; 769 char *current_entry;
770 int num_to_fill = 0; 770 int num_to_fill = 0;
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 7dd462100378..3892ab817a36 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -37,13 +37,13 @@
37 * the socket has been reestablished (so we know whether to use vc 0). 37 * the socket has been reestablished (so we know whether to use vc 0).
38 * Called while holding the cifs_tcp_ses_lock, so do not block 38 * Called while holding the cifs_tcp_ses_lock, so do not block
39 */ 39 */
40static bool is_first_ses_reconnect(struct cifsSesInfo *ses) 40static bool is_first_ses_reconnect(struct cifs_ses *ses)
41{ 41{
42 struct list_head *tmp; 42 struct list_head *tmp;
43 struct cifsSesInfo *tmp_ses; 43 struct cifs_ses *tmp_ses;
44 44
45 list_for_each(tmp, &ses->server->smb_ses_list) { 45 list_for_each(tmp, &ses->server->smb_ses_list) {
46 tmp_ses = list_entry(tmp, struct cifsSesInfo, 46 tmp_ses = list_entry(tmp, struct cifs_ses,
47 smb_ses_list); 47 smb_ses_list);
48 if (tmp_ses->need_reconnect == false) 48 if (tmp_ses->need_reconnect == false)
49 return false; 49 return false;
@@ -61,11 +61,11 @@ static bool is_first_ses_reconnect(struct cifsSesInfo *ses)
61 * any vc but zero (some servers reset the connection on vcnum zero) 61 * any vc but zero (some servers reset the connection on vcnum zero)
62 * 62 *
63 */ 63 */
64static __le16 get_next_vcnum(struct cifsSesInfo *ses) 64static __le16 get_next_vcnum(struct cifs_ses *ses)
65{ 65{
66 __u16 vcnum = 0; 66 __u16 vcnum = 0;
67 struct list_head *tmp; 67 struct list_head *tmp;
68 struct cifsSesInfo *tmp_ses; 68 struct cifs_ses *tmp_ses;
69 __u16 max_vcs = ses->server->max_vcs; 69 __u16 max_vcs = ses->server->max_vcs;
70 __u16 i; 70 __u16 i;
71 int free_vc_found = 0; 71 int free_vc_found = 0;
@@ -87,7 +87,7 @@ static __le16 get_next_vcnum(struct cifsSesInfo *ses)
87 free_vc_found = 1; 87 free_vc_found = 1;
88 88
89 list_for_each(tmp, &ses->server->smb_ses_list) { 89 list_for_each(tmp, &ses->server->smb_ses_list) {
90 tmp_ses = list_entry(tmp, struct cifsSesInfo, 90 tmp_ses = list_entry(tmp, struct cifs_ses,
91 smb_ses_list); 91 smb_ses_list);
92 if (tmp_ses->vcnum == i) { 92 if (tmp_ses->vcnum == i) {
93 free_vc_found = 0; 93 free_vc_found = 0;
@@ -114,7 +114,7 @@ get_vc_num_exit:
114 return cpu_to_le16(vcnum); 114 return cpu_to_le16(vcnum);
115} 115}
116 116
117static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB) 117static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
118{ 118{
119 __u32 capabilities = 0; 119 __u32 capabilities = 0;
120 120
@@ -136,7 +136,7 @@ static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB)
136 capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS | 136 capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
137 CAP_LARGE_WRITE_X | CAP_LARGE_READ_X; 137 CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
138 138
139 if (ses->server->secMode & 139 if (ses->server->sec_mode &
140 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 140 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
141 pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 141 pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
142 142
@@ -181,7 +181,7 @@ unicode_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp)
181 *pbcc_area = bcc_ptr; 181 *pbcc_area = bcc_ptr;
182} 182}
183 183
184static void unicode_domain_string(char **pbcc_area, struct cifsSesInfo *ses, 184static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
185 const struct nls_table *nls_cp) 185 const struct nls_table *nls_cp)
186{ 186{
187 char *bcc_ptr = *pbcc_area; 187 char *bcc_ptr = *pbcc_area;
@@ -204,7 +204,7 @@ static void unicode_domain_string(char **pbcc_area, struct cifsSesInfo *ses,
204} 204}
205 205
206 206
207static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, 207static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
208 const struct nls_table *nls_cp) 208 const struct nls_table *nls_cp)
209{ 209{
210 char *bcc_ptr = *pbcc_area; 210 char *bcc_ptr = *pbcc_area;
@@ -236,7 +236,7 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
236 *pbcc_area = bcc_ptr; 236 *pbcc_area = bcc_ptr;
237} 237}
238 238
239static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, 239static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
240 const struct nls_table *nls_cp) 240 const struct nls_table *nls_cp)
241{ 241{
242 char *bcc_ptr = *pbcc_area; 242 char *bcc_ptr = *pbcc_area;
@@ -276,7 +276,7 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
276} 276}
277 277
278static void 278static void
279decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses, 279decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifs_ses *ses,
280 const struct nls_table *nls_cp) 280 const struct nls_table *nls_cp)
281{ 281{
282 int len; 282 int len;
@@ -310,7 +310,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
310} 310}
311 311
312static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft, 312static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
313 struct cifsSesInfo *ses, 313 struct cifs_ses *ses,
314 const struct nls_table *nls_cp) 314 const struct nls_table *nls_cp)
315{ 315{
316 int rc = 0; 316 int rc = 0;
@@ -364,7 +364,7 @@ static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
364} 364}
365 365
366static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, 366static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
367 struct cifsSesInfo *ses) 367 struct cifs_ses *ses)
368{ 368{
369 unsigned int tioffset; /* challenge message target info area */ 369 unsigned int tioffset; /* challenge message target info area */
370 unsigned int tilen; /* challenge message target info area length */ 370 unsigned int tilen; /* challenge message target info area length */
@@ -411,7 +411,7 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
411/* We do not malloc the blob, it is passed in pbuffer, because 411/* We do not malloc the blob, it is passed in pbuffer, because
412 it is fixed size, and small, making this approach cleaner */ 412 it is fixed size, and small, making this approach cleaner */
413static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, 413static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
414 struct cifsSesInfo *ses) 414 struct cifs_ses *ses)
415{ 415{
416 NEGOTIATE_MESSAGE *sec_blob = (NEGOTIATE_MESSAGE *)pbuffer; 416 NEGOTIATE_MESSAGE *sec_blob = (NEGOTIATE_MESSAGE *)pbuffer;
417 __u32 flags; 417 __u32 flags;
@@ -424,7 +424,7 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
424 flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | 424 flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET |
425 NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | 425 NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
426 NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC; 426 NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC;
427 if (ses->server->secMode & 427 if (ses->server->sec_mode &
428 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { 428 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
429 flags |= NTLMSSP_NEGOTIATE_SIGN; 429 flags |= NTLMSSP_NEGOTIATE_SIGN;
430 if (!ses->server->session_estab) 430 if (!ses->server->session_estab)
@@ -449,7 +449,7 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
449 This function returns the length of the data in the blob */ 449 This function returns the length of the data in the blob */
450static int build_ntlmssp_auth_blob(unsigned char *pbuffer, 450static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
451 u16 *buflen, 451 u16 *buflen,
452 struct cifsSesInfo *ses, 452 struct cifs_ses *ses,
453 const struct nls_table *nls_cp) 453 const struct nls_table *nls_cp)
454{ 454{
455 int rc; 455 int rc;
@@ -464,10 +464,10 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
464 NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO | 464 NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO |
465 NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | 465 NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
466 NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC; 466 NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC;
467 if (ses->server->secMode & 467 if (ses->server->sec_mode &
468 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 468 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
469 flags |= NTLMSSP_NEGOTIATE_SIGN; 469 flags |= NTLMSSP_NEGOTIATE_SIGN;
470 if (ses->server->secMode & SECMODE_SIGN_REQUIRED) 470 if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
471 flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN; 471 flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN;
472 472
473 tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE); 473 tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
@@ -551,7 +551,7 @@ setup_ntlmv2_ret:
551} 551}
552 552
553int 553int
554CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, 554CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses,
555 const struct nls_table *nls_cp) 555 const struct nls_table *nls_cp)
556{ 556{
557 int rc = 0; 557 int rc = 0;
@@ -657,7 +657,7 @@ ssetup_ntlmssp_authenticate:
657 */ 657 */
658 658
659 rc = calc_lanman_hash(ses->password, ses->server->cryptkey, 659 rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
660 ses->server->secMode & SECMODE_PW_ENCRYPT ? 660 ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
661 true : false, lnm_session_key); 661 true : false, lnm_session_key);
662 662
663 ses->flags |= CIFS_SES_LANMAN; 663 ses->flags |= CIFS_SES_LANMAN;
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index f2513fb8c391..147aa22c3c3a 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -295,7 +295,7 @@ static int wait_for_free_request(struct TCP_Server_Info *server,
295 return 0; 295 return 0;
296} 296}
297 297
298static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf, 298static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
299 struct mid_q_entry **ppmidQ) 299 struct mid_q_entry **ppmidQ)
300{ 300{
301 if (ses->server->tcpStatus == CifsExiting) { 301 if (ses->server->tcpStatus == CifsExiting) {
@@ -342,22 +342,24 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
342 * the result. Caller is responsible for dealing with timeouts. 342 * the result. Caller is responsible for dealing with timeouts.
343 */ 343 */
344int 344int
345cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf, 345cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
346 mid_callback_t *callback, void *cbdata) 346 unsigned int nvec, mid_callback_t *callback, void *cbdata,
347 bool ignore_pend)
347{ 348{
348 int rc; 349 int rc;
349 struct mid_q_entry *mid; 350 struct mid_q_entry *mid;
351 struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
350 352
351 rc = wait_for_free_request(server, CIFS_ASYNC_OP); 353 rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0);
352 if (rc) 354 if (rc)
353 return rc; 355 return rc;
354 356
355 /* enable signing if server requires it */ 357 /* enable signing if server requires it */
356 if (server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 358 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
357 in_buf->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 359 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
358 360
359 mutex_lock(&server->srv_mutex); 361 mutex_lock(&server->srv_mutex);
360 mid = AllocMidQEntry(in_buf, server); 362 mid = AllocMidQEntry(hdr, server);
361 if (mid == NULL) { 363 if (mid == NULL) {
362 mutex_unlock(&server->srv_mutex); 364 mutex_unlock(&server->srv_mutex);
363 return -ENOMEM; 365 return -ENOMEM;
@@ -368,7 +370,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
368 list_add_tail(&mid->qhead, &server->pending_mid_q); 370 list_add_tail(&mid->qhead, &server->pending_mid_q);
369 spin_unlock(&GlobalMid_Lock); 371 spin_unlock(&GlobalMid_Lock);
370 372
371 rc = cifs_sign_smb(in_buf, server, &mid->sequence_number); 373 rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
372 if (rc) { 374 if (rc) {
373 mutex_unlock(&server->srv_mutex); 375 mutex_unlock(&server->srv_mutex);
374 goto out_err; 376 goto out_err;
@@ -380,7 +382,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
380#ifdef CONFIG_CIFS_STATS2 382#ifdef CONFIG_CIFS_STATS2
381 atomic_inc(&server->inSend); 383 atomic_inc(&server->inSend);
382#endif 384#endif
383 rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); 385 rc = smb_sendv(server, iov, nvec);
384#ifdef CONFIG_CIFS_STATS2 386#ifdef CONFIG_CIFS_STATS2
385 atomic_dec(&server->inSend); 387 atomic_dec(&server->inSend);
386 mid->when_sent = jiffies; 388 mid->when_sent = jiffies;
@@ -407,7 +409,7 @@ out_err:
407 * 409 *
408 */ 410 */
409int 411int
410SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses, 412SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
411 struct smb_hdr *in_buf, int flags) 413 struct smb_hdr *in_buf, int flags)
412{ 414{
413 int rc; 415 int rc;
@@ -424,7 +426,7 @@ SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
424} 426}
425 427
426static int 428static int
427sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) 429cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
428{ 430{
429 int rc = 0; 431 int rc = 0;
430 432
@@ -432,28 +434,21 @@ sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
432 mid->mid, mid->midState); 434 mid->mid, mid->midState);
433 435
434 spin_lock(&GlobalMid_Lock); 436 spin_lock(&GlobalMid_Lock);
435 /* ensure that it's no longer on the pending_mid_q */
436 list_del_init(&mid->qhead);
437
438 switch (mid->midState) { 437 switch (mid->midState) {
439 case MID_RESPONSE_RECEIVED: 438 case MID_RESPONSE_RECEIVED:
440 spin_unlock(&GlobalMid_Lock); 439 spin_unlock(&GlobalMid_Lock);
441 return rc; 440 return rc;
442 case MID_REQUEST_SUBMITTED:
443 /* socket is going down, reject all calls */
444 if (server->tcpStatus == CifsExiting) {
445 cERROR(1, "%s: canceling mid=%d cmd=0x%x state=%d",
446 __func__, mid->mid, mid->command, mid->midState);
447 rc = -EHOSTDOWN;
448 break;
449 }
450 case MID_RETRY_NEEDED: 441 case MID_RETRY_NEEDED:
451 rc = -EAGAIN; 442 rc = -EAGAIN;
452 break; 443 break;
453 case MID_RESPONSE_MALFORMED: 444 case MID_RESPONSE_MALFORMED:
454 rc = -EIO; 445 rc = -EIO;
455 break; 446 break;
447 case MID_SHUTDOWN:
448 rc = -EHOSTDOWN;
449 break;
456 default: 450 default:
451 list_del_init(&mid->qhead);
457 cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__, 452 cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__,
458 mid->mid, mid->midState); 453 mid->mid, mid->midState);
459 rc = -EIO; 454 rc = -EIO;
@@ -502,13 +497,31 @@ send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
502} 497}
503 498
504int 499int
505SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, 500cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
501 bool log_error)
502{
503 dump_smb(mid->resp_buf,
504 min_t(u32, 92, be32_to_cpu(mid->resp_buf->smb_buf_length)));
505
506 /* convert the length into a more usable form */
507 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
508 /* FIXME: add code to kill session */
509 if (cifs_verify_signature(mid->resp_buf, server,
510 mid->sequence_number + 1) != 0)
511 cERROR(1, "Unexpected SMB signature");
512 }
513
514 /* BB special case reconnect tid and uid here? */
515 return map_smb_to_linux_error(mid->resp_buf, log_error);
516}
517
518int
519SendReceive2(const unsigned int xid, struct cifs_ses *ses,
506 struct kvec *iov, int n_vec, int *pRespBufType /* ret */, 520 struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
507 const int flags) 521 const int flags)
508{ 522{
509 int rc = 0; 523 int rc = 0;
510 int long_op; 524 int long_op;
511 unsigned int receive_len;
512 struct mid_q_entry *midQ; 525 struct mid_q_entry *midQ;
513 struct smb_hdr *in_buf = iov[0].iov_base; 526 struct smb_hdr *in_buf = iov[0].iov_base;
514 527
@@ -598,61 +611,31 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
598 611
599 cifs_small_buf_release(in_buf); 612 cifs_small_buf_release(in_buf);
600 613
601 rc = sync_mid_result(midQ, ses->server); 614 rc = cifs_sync_mid_result(midQ, ses->server);
602 if (rc != 0) { 615 if (rc != 0) {
603 atomic_dec(&ses->server->inFlight); 616 atomic_dec(&ses->server->inFlight);
604 wake_up(&ses->server->request_q); 617 wake_up(&ses->server->request_q);
605 return rc; 618 return rc;
606 } 619 }
607 620
608 receive_len = be32_to_cpu(midQ->resp_buf->smb_buf_length); 621 if (!midQ->resp_buf || midQ->midState != MID_RESPONSE_RECEIVED) {
609
610 if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
611 cERROR(1, "Frame too large received. Length: %d Xid: %d",
612 receive_len, xid);
613 rc = -EIO; 622 rc = -EIO;
623 cFYI(1, "Bad MID state?");
614 goto out; 624 goto out;
615 } 625 }
616 626
617 /* rcvd frame is ok */ 627 iov[0].iov_base = (char *)midQ->resp_buf;
618 628 iov[0].iov_len = be32_to_cpu(midQ->resp_buf->smb_buf_length) + 4;
619 if (midQ->resp_buf && 629 if (midQ->largeBuf)
620 (midQ->midState == MID_RESPONSE_RECEIVED)) { 630 *pRespBufType = CIFS_LARGE_BUFFER;
621 631 else
622 iov[0].iov_base = (char *)midQ->resp_buf; 632 *pRespBufType = CIFS_SMALL_BUFFER;
623 if (midQ->largeBuf)
624 *pRespBufType = CIFS_LARGE_BUFFER;
625 else
626 *pRespBufType = CIFS_SMALL_BUFFER;
627 iov[0].iov_len = receive_len + 4;
628
629 dump_smb(midQ->resp_buf, 80);
630 /* convert the length into a more usable form */
631 if ((receive_len > 24) &&
632 (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
633 SECMODE_SIGN_ENABLED))) {
634 rc = cifs_verify_signature(midQ->resp_buf,
635 ses->server,
636 midQ->sequence_number+1);
637 if (rc) {
638 cERROR(1, "Unexpected SMB signature");
639 /* BB FIXME add code to kill session */
640 }
641 }
642
643 /* BB special case reconnect tid and uid here? */
644 rc = map_smb_to_linux_error(midQ->resp_buf,
645 flags & CIFS_LOG_ERROR);
646 633
647 if ((flags & CIFS_NO_RESP) == 0) 634 rc = cifs_check_receive(midQ, ses->server, flags & CIFS_LOG_ERROR);
648 midQ->resp_buf = NULL; /* mark it so buf will
649 not be freed by
650 delete_mid */
651 } else {
652 rc = -EIO;
653 cFYI(1, "Bad MID state?");
654 }
655 635
636 /* mark it so buf will not be freed by delete_mid */
637 if ((flags & CIFS_NO_RESP) == 0)
638 midQ->resp_buf = NULL;
656out: 639out:
657 delete_mid(midQ); 640 delete_mid(midQ);
658 atomic_dec(&ses->server->inFlight); 641 atomic_dec(&ses->server->inFlight);
@@ -662,12 +645,11 @@ out:
662} 645}
663 646
664int 647int
665SendReceive(const unsigned int xid, struct cifsSesInfo *ses, 648SendReceive(const unsigned int xid, struct cifs_ses *ses,
666 struct smb_hdr *in_buf, struct smb_hdr *out_buf, 649 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
667 int *pbytes_returned, const int long_op) 650 int *pbytes_returned, const int long_op)
668{ 651{
669 int rc = 0; 652 int rc = 0;
670 unsigned int receive_len;
671 struct mid_q_entry *midQ; 653 struct mid_q_entry *midQ;
672 654
673 if (ses == NULL) { 655 if (ses == NULL) {
@@ -750,54 +732,23 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
750 spin_unlock(&GlobalMid_Lock); 732 spin_unlock(&GlobalMid_Lock);
751 } 733 }
752 734
753 rc = sync_mid_result(midQ, ses->server); 735 rc = cifs_sync_mid_result(midQ, ses->server);
754 if (rc != 0) { 736 if (rc != 0) {
755 atomic_dec(&ses->server->inFlight); 737 atomic_dec(&ses->server->inFlight);
756 wake_up(&ses->server->request_q); 738 wake_up(&ses->server->request_q);
757 return rc; 739 return rc;
758 } 740 }
759 741
760 receive_len = be32_to_cpu(midQ->resp_buf->smb_buf_length); 742 if (!midQ->resp_buf || !out_buf ||
761 743 midQ->midState != MID_RESPONSE_RECEIVED) {
762 if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
763 cERROR(1, "Frame too large received. Length: %d Xid: %d",
764 receive_len, xid);
765 rc = -EIO;
766 goto out;
767 }
768
769 /* rcvd frame is ok */
770
771 if (midQ->resp_buf && out_buf
772 && (midQ->midState == MID_RESPONSE_RECEIVED)) {
773 out_buf->smb_buf_length = cpu_to_be32(receive_len);
774 memcpy((char *)out_buf + 4,
775 (char *)midQ->resp_buf + 4,
776 receive_len);
777
778 dump_smb(out_buf, 92);
779 /* convert the length into a more usable form */
780 if ((receive_len > 24) &&
781 (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
782 SECMODE_SIGN_ENABLED))) {
783 rc = cifs_verify_signature(out_buf,
784 ses->server,
785 midQ->sequence_number+1);
786 if (rc) {
787 cERROR(1, "Unexpected SMB signature");
788 /* BB FIXME add code to kill session */
789 }
790 }
791
792 *pbytes_returned = be32_to_cpu(out_buf->smb_buf_length);
793
794 /* BB special case reconnect tid and uid here? */
795 rc = map_smb_to_linux_error(out_buf, 0 /* no log */ );
796 } else {
797 rc = -EIO; 744 rc = -EIO;
798 cERROR(1, "Bad MID state?"); 745 cERROR(1, "Bad MID state?");
746 goto out;
799 } 747 }
800 748
749 *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
750 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
751 rc = cifs_check_receive(midQ, ses->server, 0);
801out: 752out:
802 delete_mid(midQ); 753 delete_mid(midQ);
803 atomic_dec(&ses->server->inFlight); 754 atomic_dec(&ses->server->inFlight);
@@ -810,12 +761,12 @@ out:
810 blocking lock to return. */ 761 blocking lock to return. */
811 762
812static int 763static int
813send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon, 764send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
814 struct smb_hdr *in_buf, 765 struct smb_hdr *in_buf,
815 struct smb_hdr *out_buf) 766 struct smb_hdr *out_buf)
816{ 767{
817 int bytes_returned; 768 int bytes_returned;
818 struct cifsSesInfo *ses = tcon->ses; 769 struct cifs_ses *ses = tcon->ses;
819 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf; 770 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
820 771
821 /* We just modify the current in_buf to change 772 /* We just modify the current in_buf to change
@@ -832,15 +783,14 @@ send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon,
832} 783}
833 784
834int 785int
835SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, 786SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
836 struct smb_hdr *in_buf, struct smb_hdr *out_buf, 787 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
837 int *pbytes_returned) 788 int *pbytes_returned)
838{ 789{
839 int rc = 0; 790 int rc = 0;
840 int rstart = 0; 791 int rstart = 0;
841 unsigned int receive_len;
842 struct mid_q_entry *midQ; 792 struct mid_q_entry *midQ;
843 struct cifsSesInfo *ses; 793 struct cifs_ses *ses;
844 794
845 if (tcon == NULL || tcon->ses == NULL) { 795 if (tcon == NULL || tcon->ses == NULL) {
846 cERROR(1, "Null smb session"); 796 cERROR(1, "Null smb session");
@@ -957,50 +907,20 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
957 rstart = 1; 907 rstart = 1;
958 } 908 }
959 909
960 rc = sync_mid_result(midQ, ses->server); 910 rc = cifs_sync_mid_result(midQ, ses->server);
961 if (rc != 0) 911 if (rc != 0)
962 return rc; 912 return rc;
963 913
964 receive_len = be32_to_cpu(midQ->resp_buf->smb_buf_length);
965 if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
966 cERROR(1, "Frame too large received. Length: %d Xid: %d",
967 receive_len, xid);
968 rc = -EIO;
969 goto out;
970 }
971
972 /* rcvd frame is ok */ 914 /* rcvd frame is ok */
973 915 if (out_buf == NULL || midQ->midState != MID_RESPONSE_RECEIVED) {
974 if ((out_buf == NULL) || (midQ->midState != MID_RESPONSE_RECEIVED)) {
975 rc = -EIO; 916 rc = -EIO;
976 cERROR(1, "Bad MID state?"); 917 cERROR(1, "Bad MID state?");
977 goto out; 918 goto out;
978 } 919 }
979 920
980 out_buf->smb_buf_length = cpu_to_be32(receive_len); 921 *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
981 memcpy((char *)out_buf + 4, 922 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
982 (char *)midQ->resp_buf + 4, 923 rc = cifs_check_receive(midQ, ses->server, 0);
983 receive_len);
984
985 dump_smb(out_buf, 92);
986 /* convert the length into a more usable form */
987 if ((receive_len > 24) &&
988 (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
989 SECMODE_SIGN_ENABLED))) {
990 rc = cifs_verify_signature(out_buf,
991 ses->server,
992 midQ->sequence_number+1);
993 if (rc) {
994 cERROR(1, "Unexpected SMB signature");
995 /* BB FIXME add code to kill session */
996 }
997 }
998
999 *pbytes_returned = be32_to_cpu(out_buf->smb_buf_length);
1000
1001 /* BB special case reconnect tid and uid here? */
1002 rc = map_smb_to_linux_error(out_buf, 0 /* no log */ );
1003
1004out: 924out:
1005 delete_mid(midQ); 925 delete_mid(midQ);
1006 if (rstart && rc == -EACCES) 926 if (rstart && rc == -EACCES)
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 912995e013ec..2a22fb2989e4 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -49,7 +49,7 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
49 int xid; 49 int xid;
50 struct cifs_sb_info *cifs_sb; 50 struct cifs_sb_info *cifs_sb;
51 struct tcon_link *tlink; 51 struct tcon_link *tlink;
52 struct cifsTconInfo *pTcon; 52 struct cifs_tcon *pTcon;
53 struct super_block *sb; 53 struct super_block *sb;
54 char *full_path = NULL; 54 char *full_path = NULL;
55 55
@@ -109,7 +109,7 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
109 int xid; 109 int xid;
110 struct cifs_sb_info *cifs_sb; 110 struct cifs_sb_info *cifs_sb;
111 struct tcon_link *tlink; 111 struct tcon_link *tlink;
112 struct cifsTconInfo *pTcon; 112 struct cifs_tcon *pTcon;
113 struct super_block *sb; 113 struct super_block *sb;
114 char *full_path; 114 char *full_path;
115 struct cifs_ntsd *pacl; 115 struct cifs_ntsd *pacl;
@@ -240,7 +240,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
240 int xid; 240 int xid;
241 struct cifs_sb_info *cifs_sb; 241 struct cifs_sb_info *cifs_sb;
242 struct tcon_link *tlink; 242 struct tcon_link *tlink;
243 struct cifsTconInfo *pTcon; 243 struct cifs_tcon *pTcon;
244 struct super_block *sb; 244 struct super_block *sb;
245 char *full_path; 245 char *full_path;
246 246
@@ -372,7 +372,7 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
372 int xid; 372 int xid;
373 struct cifs_sb_info *cifs_sb; 373 struct cifs_sb_info *cifs_sb;
374 struct tcon_link *tlink; 374 struct tcon_link *tlink;
375 struct cifsTconInfo *pTcon; 375 struct cifs_tcon *pTcon;
376 struct super_block *sb; 376 struct super_block *sb;
377 char *full_path; 377 char *full_path;
378 378
diff --git a/fs/dlm/main.c b/fs/dlm/main.c
index b80e0aa3cfa5..5a59efa0bb46 100644
--- a/fs/dlm/main.c
+++ b/fs/dlm/main.c
@@ -50,7 +50,7 @@ static int __init init_dlm(void)
50 if (error) 50 if (error)
51 goto out_netlink; 51 goto out_netlink;
52 52
53 printk("DLM (built %s %s) installed\n", __DATE__, __TIME__); 53 printk("DLM installed\n");
54 54
55 return 0; 55 return 0;
56 56
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 227b409b8406..bc116b9ffcf2 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -529,6 +529,8 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
529 dget(lower_dentry); 529 dget(lower_dentry);
530 rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry); 530 rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry);
531 dput(lower_dentry); 531 dput(lower_dentry);
532 if (!rc && dentry->d_inode)
533 clear_nlink(dentry->d_inode);
532 fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode); 534 fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
533 dir->i_nlink = lower_dir_dentry->d_inode->i_nlink; 535 dir->i_nlink = lower_dir_dentry->d_inode->i_nlink;
534 unlock_dir(lower_dir_dentry); 536 unlock_dir(lower_dir_dentry);
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 03e609c45012..27a7fefb83eb 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -599,8 +599,8 @@ struct ecryptfs_write_tag_70_packet_silly_stack {
599 struct mutex *tfm_mutex; 599 struct mutex *tfm_mutex;
600 char *block_aligned_filename; 600 char *block_aligned_filename;
601 struct ecryptfs_auth_tok *auth_tok; 601 struct ecryptfs_auth_tok *auth_tok;
602 struct scatterlist src_sg; 602 struct scatterlist src_sg[2];
603 struct scatterlist dst_sg; 603 struct scatterlist dst_sg[2];
604 struct blkcipher_desc desc; 604 struct blkcipher_desc desc;
605 char iv[ECRYPTFS_MAX_IV_BYTES]; 605 char iv[ECRYPTFS_MAX_IV_BYTES];
606 char hash[ECRYPTFS_TAG_70_DIGEST_SIZE]; 606 char hash[ECRYPTFS_TAG_70_DIGEST_SIZE];
@@ -816,23 +816,21 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
816 memcpy(&s->block_aligned_filename[s->num_rand_bytes], filename, 816 memcpy(&s->block_aligned_filename[s->num_rand_bytes], filename,
817 filename_size); 817 filename_size);
818 rc = virt_to_scatterlist(s->block_aligned_filename, 818 rc = virt_to_scatterlist(s->block_aligned_filename,
819 s->block_aligned_filename_size, &s->src_sg, 1); 819 s->block_aligned_filename_size, s->src_sg, 2);
820 if (rc != 1) { 820 if (rc < 1) {
821 printk(KERN_ERR "%s: Internal error whilst attempting to " 821 printk(KERN_ERR "%s: Internal error whilst attempting to "
822 "convert filename memory to scatterlist; " 822 "convert filename memory to scatterlist; rc = [%d]. "
823 "expected rc = 1; got rc = [%d]. "
824 "block_aligned_filename_size = [%zd]\n", __func__, rc, 823 "block_aligned_filename_size = [%zd]\n", __func__, rc,
825 s->block_aligned_filename_size); 824 s->block_aligned_filename_size);
826 goto out_release_free_unlock; 825 goto out_release_free_unlock;
827 } 826 }
828 rc = virt_to_scatterlist(&dest[s->i], s->block_aligned_filename_size, 827 rc = virt_to_scatterlist(&dest[s->i], s->block_aligned_filename_size,
829 &s->dst_sg, 1); 828 s->dst_sg, 2);
830 if (rc != 1) { 829 if (rc < 1) {
831 printk(KERN_ERR "%s: Internal error whilst attempting to " 830 printk(KERN_ERR "%s: Internal error whilst attempting to "
832 "convert encrypted filename memory to scatterlist; " 831 "convert encrypted filename memory to scatterlist; "
833 "expected rc = 1; got rc = [%d]. " 832 "rc = [%d]. block_aligned_filename_size = [%zd]\n",
834 "block_aligned_filename_size = [%zd]\n", __func__, rc, 833 __func__, rc, s->block_aligned_filename_size);
835 s->block_aligned_filename_size);
836 goto out_release_free_unlock; 834 goto out_release_free_unlock;
837 } 835 }
838 /* The characters in the first block effectively do the job 836 /* The characters in the first block effectively do the job
@@ -855,7 +853,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
855 mount_crypt_stat->global_default_fn_cipher_key_bytes); 853 mount_crypt_stat->global_default_fn_cipher_key_bytes);
856 goto out_release_free_unlock; 854 goto out_release_free_unlock;
857 } 855 }
858 rc = crypto_blkcipher_encrypt_iv(&s->desc, &s->dst_sg, &s->src_sg, 856 rc = crypto_blkcipher_encrypt_iv(&s->desc, s->dst_sg, s->src_sg,
859 s->block_aligned_filename_size); 857 s->block_aligned_filename_size);
860 if (rc) { 858 if (rc) {
861 printk(KERN_ERR "%s: Error attempting to encrypt filename; " 859 printk(KERN_ERR "%s: Error attempting to encrypt filename; "
@@ -891,8 +889,8 @@ struct ecryptfs_parse_tag_70_packet_silly_stack {
891 struct mutex *tfm_mutex; 889 struct mutex *tfm_mutex;
892 char *decrypted_filename; 890 char *decrypted_filename;
893 struct ecryptfs_auth_tok *auth_tok; 891 struct ecryptfs_auth_tok *auth_tok;
894 struct scatterlist src_sg; 892 struct scatterlist src_sg[2];
895 struct scatterlist dst_sg; 893 struct scatterlist dst_sg[2];
896 struct blkcipher_desc desc; 894 struct blkcipher_desc desc;
897 char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1]; 895 char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1];
898 char iv[ECRYPTFS_MAX_IV_BYTES]; 896 char iv[ECRYPTFS_MAX_IV_BYTES];
@@ -1008,13 +1006,12 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
1008 } 1006 }
1009 mutex_lock(s->tfm_mutex); 1007 mutex_lock(s->tfm_mutex);
1010 rc = virt_to_scatterlist(&data[(*packet_size)], 1008 rc = virt_to_scatterlist(&data[(*packet_size)],
1011 s->block_aligned_filename_size, &s->src_sg, 1); 1009 s->block_aligned_filename_size, s->src_sg, 2);
1012 if (rc != 1) { 1010 if (rc < 1) {
1013 printk(KERN_ERR "%s: Internal error whilst attempting to " 1011 printk(KERN_ERR "%s: Internal error whilst attempting to "
1014 "convert encrypted filename memory to scatterlist; " 1012 "convert encrypted filename memory to scatterlist; "
1015 "expected rc = 1; got rc = [%d]. " 1013 "rc = [%d]. block_aligned_filename_size = [%zd]\n",
1016 "block_aligned_filename_size = [%zd]\n", __func__, rc, 1014 __func__, rc, s->block_aligned_filename_size);
1017 s->block_aligned_filename_size);
1018 goto out_unlock; 1015 goto out_unlock;
1019 } 1016 }
1020 (*packet_size) += s->block_aligned_filename_size; 1017 (*packet_size) += s->block_aligned_filename_size;
@@ -1028,13 +1025,12 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
1028 goto out_unlock; 1025 goto out_unlock;
1029 } 1026 }
1030 rc = virt_to_scatterlist(s->decrypted_filename, 1027 rc = virt_to_scatterlist(s->decrypted_filename,
1031 s->block_aligned_filename_size, &s->dst_sg, 1); 1028 s->block_aligned_filename_size, s->dst_sg, 2);
1032 if (rc != 1) { 1029 if (rc < 1) {
1033 printk(KERN_ERR "%s: Internal error whilst attempting to " 1030 printk(KERN_ERR "%s: Internal error whilst attempting to "
1034 "convert decrypted filename memory to scatterlist; " 1031 "convert decrypted filename memory to scatterlist; "
1035 "expected rc = 1; got rc = [%d]. " 1032 "rc = [%d]. block_aligned_filename_size = [%zd]\n",
1036 "block_aligned_filename_size = [%zd]\n", __func__, rc, 1033 __func__, rc, s->block_aligned_filename_size);
1037 s->block_aligned_filename_size);
1038 goto out_free_unlock; 1034 goto out_free_unlock;
1039 } 1035 }
1040 /* The characters in the first block effectively do the job of 1036 /* The characters in the first block effectively do the job of
@@ -1065,7 +1061,7 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
1065 mount_crypt_stat->global_default_fn_cipher_key_bytes); 1061 mount_crypt_stat->global_default_fn_cipher_key_bytes);
1066 goto out_free_unlock; 1062 goto out_free_unlock;
1067 } 1063 }
1068 rc = crypto_blkcipher_decrypt_iv(&s->desc, &s->dst_sg, &s->src_sg, 1064 rc = crypto_blkcipher_decrypt_iv(&s->desc, s->dst_sg, s->src_sg,
1069 s->block_aligned_filename_size); 1065 s->block_aligned_filename_size);
1070 if (rc) { 1066 if (rc) {
1071 printk(KERN_ERR "%s: Error attempting to decrypt filename; " 1067 printk(KERN_ERR "%s: Error attempting to decrypt filename; "
diff --git a/fs/exec.c b/fs/exec.c
index 936f5776655c..ea5f748906a8 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -42,7 +42,6 @@
42#include <linux/pid_namespace.h> 42#include <linux/pid_namespace.h>
43#include <linux/module.h> 43#include <linux/module.h>
44#include <linux/namei.h> 44#include <linux/namei.h>
45#include <linux/proc_fs.h>
46#include <linux/mount.h> 45#include <linux/mount.h>
47#include <linux/security.h> 46#include <linux/security.h>
48#include <linux/syscalls.h> 47#include <linux/syscalls.h>
@@ -1624,6 +1623,41 @@ expand_fail:
1624 return ret; 1623 return ret;
1625} 1624}
1626 1625
1626static int cn_print_exe_file(struct core_name *cn)
1627{
1628 struct file *exe_file;
1629 char *pathbuf, *path, *p;
1630 int ret;
1631
1632 exe_file = get_mm_exe_file(current->mm);
1633 if (!exe_file)
1634 return cn_printf(cn, "(unknown)");
1635
1636 pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY);
1637 if (!pathbuf) {
1638 ret = -ENOMEM;
1639 goto put_exe_file;
1640 }
1641
1642 path = d_path(&exe_file->f_path, pathbuf, PATH_MAX);
1643 if (IS_ERR(path)) {
1644 ret = PTR_ERR(path);
1645 goto free_buf;
1646 }
1647
1648 for (p = path; *p; p++)
1649 if (*p == '/')
1650 *p = '!';
1651
1652 ret = cn_printf(cn, "%s", path);
1653
1654free_buf:
1655 kfree(pathbuf);
1656put_exe_file:
1657 fput(exe_file);
1658 return ret;
1659}
1660
1627/* format_corename will inspect the pattern parameter, and output a 1661/* format_corename will inspect the pattern parameter, and output a
1628 * name into corename, which must have space for at least 1662 * name into corename, which must have space for at least
1629 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. 1663 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
@@ -1695,6 +1729,9 @@ static int format_corename(struct core_name *cn, long signr)
1695 case 'e': 1729 case 'e':
1696 err = cn_printf(cn, "%s", current->comm); 1730 err = cn_printf(cn, "%s", current->comm);
1697 break; 1731 break;
1732 case 'E':
1733 err = cn_print_exe_file(cn);
1734 break;
1698 /* core limit size */ 1735 /* core limit size */
1699 case 'c': 1736 case 'c':
1700 err = cn_printf(cn, "%lu", 1737 err = cn_printf(cn, "%lu",
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index cfa327d33194..c2b34cd2abe0 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -146,7 +146,7 @@ static int __init init_gfs2_fs(void)
146 146
147 gfs2_register_debugfs(); 147 gfs2_register_debugfs();
148 148
149 printk("GFS2 (built %s %s) installed\n", __DATE__, __TIME__); 149 printk("GFS2 installed\n");
150 150
151 return 0; 151 return 0;
152 152
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 05f73328b28b..9a1e86fc1362 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -75,7 +75,6 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
75 struct nameidata *nd) 75 struct nameidata *nd)
76{ 76{
77 struct jffs2_inode_info *dir_f; 77 struct jffs2_inode_info *dir_f;
78 struct jffs2_sb_info *c;
79 struct jffs2_full_dirent *fd = NULL, *fd_list; 78 struct jffs2_full_dirent *fd = NULL, *fd_list;
80 uint32_t ino = 0; 79 uint32_t ino = 0;
81 struct inode *inode = NULL; 80 struct inode *inode = NULL;
@@ -86,7 +85,6 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
86 return ERR_PTR(-ENAMETOOLONG); 85 return ERR_PTR(-ENAMETOOLONG);
87 86
88 dir_f = JFFS2_INODE_INFO(dir_i); 87 dir_f = JFFS2_INODE_INFO(dir_i);
89 c = JFFS2_SB_INFO(dir_i->i_sb);
90 88
91 mutex_lock(&dir_f->sem); 89 mutex_lock(&dir_f->sem);
92 90
@@ -119,7 +117,6 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
119static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir) 117static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
120{ 118{
121 struct jffs2_inode_info *f; 119 struct jffs2_inode_info *f;
122 struct jffs2_sb_info *c;
123 struct inode *inode = filp->f_path.dentry->d_inode; 120 struct inode *inode = filp->f_path.dentry->d_inode;
124 struct jffs2_full_dirent *fd; 121 struct jffs2_full_dirent *fd;
125 unsigned long offset, curofs; 122 unsigned long offset, curofs;
@@ -127,7 +124,6 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
127 D1(printk(KERN_DEBUG "jffs2_readdir() for dir_i #%lu\n", filp->f_path.dentry->d_inode->i_ino)); 124 D1(printk(KERN_DEBUG "jffs2_readdir() for dir_i #%lu\n", filp->f_path.dentry->d_inode->i_ino));
128 125
129 f = JFFS2_INODE_INFO(inode); 126 f = JFFS2_INODE_INFO(inode);
130 c = JFFS2_SB_INFO(inode->i_sb);
131 127
132 offset = filp->f_pos; 128 offset = filp->f_pos;
133 129
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index b632dddcb482..8d8cd3419d02 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -94,7 +94,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
94 uint32_t buf_size = 0; 94 uint32_t buf_size = 0;
95 struct jffs2_summary *s = NULL; /* summary info collected by the scan process */ 95 struct jffs2_summary *s = NULL; /* summary info collected by the scan process */
96#ifndef __ECOS 96#ifndef __ECOS
97 size_t pointlen; 97 size_t pointlen, try_size;
98 98
99 if (c->mtd->point) { 99 if (c->mtd->point) {
100 ret = c->mtd->point(c->mtd, 0, c->mtd->size, &pointlen, 100 ret = c->mtd->point(c->mtd, 0, c->mtd->size, &pointlen,
@@ -113,18 +113,21 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
113 /* For NAND it's quicker to read a whole eraseblock at a time, 113 /* For NAND it's quicker to read a whole eraseblock at a time,
114 apparently */ 114 apparently */
115 if (jffs2_cleanmarker_oob(c)) 115 if (jffs2_cleanmarker_oob(c))
116 buf_size = c->sector_size; 116 try_size = c->sector_size;
117 else 117 else
118 buf_size = PAGE_SIZE; 118 try_size = PAGE_SIZE;
119 119
120 /* Respect kmalloc limitations */ 120 D1(printk(KERN_DEBUG "Trying to allocate readbuf of %zu "
121 if (buf_size > 128*1024) 121 "bytes\n", try_size));
122 buf_size = 128*1024;
123 122
124 D1(printk(KERN_DEBUG "Allocating readbuf of %d bytes\n", buf_size)); 123 flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size);
125 flashbuf = kmalloc(buf_size, GFP_KERNEL);
126 if (!flashbuf) 124 if (!flashbuf)
127 return -ENOMEM; 125 return -ENOMEM;
126
127 D1(printk(KERN_DEBUG "Allocated readbuf of %zu bytes\n",
128 try_size));
129
130 buf_size = (uint32_t)try_size;
128 } 131 }
129 132
130 if (jffs2_sum_active()) { 133 if (jffs2_sum_active()) {
diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c
index a7c07b44b100..e5d71b27a5b0 100644
--- a/fs/ncpfs/mmap.c
+++ b/fs/ncpfs/mmap.c
@@ -16,6 +16,7 @@
16#include <linux/mman.h> 16#include <linux/mman.h>
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/fcntl.h> 18#include <linux/fcntl.h>
19#include <linux/memcontrol.h>
19 20
20#include <asm/uaccess.h> 21#include <asm/uaccess.h>
21#include <asm/system.h> 22#include <asm/system.h>
@@ -92,6 +93,7 @@ static int ncp_file_mmap_fault(struct vm_area_struct *area,
92 * -- wli 93 * -- wli
93 */ 94 */
94 count_vm_event(PGMAJFAULT); 95 count_vm_event(PGMAJFAULT);
96 mem_cgroup_count_vm_event(area->vm_mm, PGMAJFAULT);
95 return VM_FAULT_MAJOR; 97 return VM_FAULT_MAJOR;
96} 98}
97 99
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 4c5488468c14..cd9427023d2e 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -368,7 +368,7 @@ static int ocfs2_find_victim_alloc_group(struct inode *inode,
368 int *vict_bit, 368 int *vict_bit,
369 struct buffer_head **ret_bh) 369 struct buffer_head **ret_bh)
370{ 370{
371 int ret, i, blocks_per_unit = 1; 371 int ret, i, bits_per_unit = 0;
372 u64 blkno; 372 u64 blkno;
373 char namebuf[40]; 373 char namebuf[40];
374 374
@@ -398,14 +398,14 @@ static int ocfs2_find_victim_alloc_group(struct inode *inode,
398 rec = &(cl->cl_recs[0]); 398 rec = &(cl->cl_recs[0]);
399 399
400 if (type == GLOBAL_BITMAP_SYSTEM_INODE) 400 if (type == GLOBAL_BITMAP_SYSTEM_INODE)
401 blocks_per_unit <<= (osb->s_clustersize_bits - 401 bits_per_unit = osb->s_clustersize_bits -
402 inode->i_sb->s_blocksize_bits); 402 inode->i_sb->s_blocksize_bits;
403 /* 403 /*
404 * 'vict_blkno' was out of the valid range. 404 * 'vict_blkno' was out of the valid range.
405 */ 405 */
406 if ((vict_blkno < le64_to_cpu(rec->c_blkno)) || 406 if ((vict_blkno < le64_to_cpu(rec->c_blkno)) ||
407 (vict_blkno >= (le32_to_cpu(ac_dinode->id1.bitmap1.i_total) * 407 (vict_blkno >= (le32_to_cpu(ac_dinode->id1.bitmap1.i_total) <<
408 blocks_per_unit))) { 408 bits_per_unit))) {
409 ret = -EINVAL; 409 ret = -EINVAL;
410 goto out; 410 goto out;
411 } 411 }
@@ -441,8 +441,8 @@ static int ocfs2_find_victim_alloc_group(struct inode *inode,
441 le16_to_cpu(bg->bg_bits))) { 441 le16_to_cpu(bg->bg_bits))) {
442 442
443 *ret_bh = gd_bh; 443 *ret_bh = gd_bh;
444 *vict_bit = (vict_blkno - blkno) / 444 *vict_bit = (vict_blkno - blkno) >>
445 blocks_per_unit; 445 bits_per_unit;
446 mlog(0, "find the victim group: #%llu, " 446 mlog(0, "find the victim group: #%llu, "
447 "total_bits: %u, vict_bit: %u\n", 447 "total_bits: %u, vict_bit: %u\n",
448 blkno, le16_to_cpu(bg->bg_bits), 448 blkno, le16_to_cpu(bg->bg_bits),
@@ -472,12 +472,24 @@ static int ocfs2_validate_and_adjust_move_goal(struct inode *inode,
472 int ret, goal_bit = 0; 472 int ret, goal_bit = 0;
473 473
474 struct buffer_head *gd_bh = NULL; 474 struct buffer_head *gd_bh = NULL;
475 struct ocfs2_group_desc *bg; 475 struct ocfs2_group_desc *bg = NULL;
476 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 476 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
477 int c_to_b = 1 << (osb->s_clustersize_bits - 477 int c_to_b = 1 << (osb->s_clustersize_bits -
478 inode->i_sb->s_blocksize_bits); 478 inode->i_sb->s_blocksize_bits);
479 479
480 /* 480 /*
481 * make goal become cluster aligned.
482 */
483 range->me_goal = ocfs2_block_to_cluster_start(inode->i_sb,
484 range->me_goal);
485 /*
486 * moving goal is not allowd to start with a group desc blok(#0 blk)
487 * let's compromise to the latter cluster.
488 */
489 if (range->me_goal == le64_to_cpu(bg->bg_blkno))
490 range->me_goal += c_to_b;
491
492 /*
481 * validate goal sits within global_bitmap, and return the victim 493 * validate goal sits within global_bitmap, and return the victim
482 * group desc 494 * group desc
483 */ 495 */
@@ -491,19 +503,6 @@ static int ocfs2_validate_and_adjust_move_goal(struct inode *inode,
491 bg = (struct ocfs2_group_desc *)gd_bh->b_data; 503 bg = (struct ocfs2_group_desc *)gd_bh->b_data;
492 504
493 /* 505 /*
494 * make goal become cluster aligned.
495 */
496 if (range->me_goal % c_to_b)
497 range->me_goal = range->me_goal / c_to_b * c_to_b;
498
499 /*
500 * moving goal is not allowd to start with a group desc blok(#0 blk)
501 * let's compromise to the latter cluster.
502 */
503 if (range->me_goal == le64_to_cpu(bg->bg_blkno))
504 range->me_goal += c_to_b;
505
506 /*
507 * movement is not gonna cross two groups. 506 * movement is not gonna cross two groups.
508 */ 507 */
509 if ((le16_to_cpu(bg->bg_bits) - goal_bit) * osb->s_clustersize < 508 if ((le16_to_cpu(bg->bg_bits) - goal_bit) * osb->s_clustersize <
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 8ed4d3433199..f82e762eeca2 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -256,10 +256,12 @@ ssize_t part_discard_alignment_show(struct device *dev,
256{ 256{
257 struct hd_struct *p = dev_to_part(dev); 257 struct hd_struct *p = dev_to_part(dev);
258 struct gendisk *disk = dev_to_disk(dev); 258 struct gendisk *disk = dev_to_disk(dev);
259 unsigned int alignment = 0;
259 260
260 return sprintf(buf, "%u\n", 261 if (disk->queue)
261 queue_limit_discard_alignment(&disk->queue->limits, 262 alignment = queue_limit_discard_alignment(&disk->queue->limits,
262 p->start_sect)); 263 p->start_sect);
264 return sprintf(buf, "%u\n", alignment);
263} 265}
264 266
265ssize_t part_stat_show(struct device *dev, 267ssize_t part_stat_show(struct device *dev,
diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
index 19d6750d1d6c..6296b403c67a 100644
--- a/fs/partitions/efi.c
+++ b/fs/partitions/efi.c
@@ -310,6 +310,15 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
310 goto fail; 310 goto fail;
311 } 311 }
312 312
313 /* Check the GUID Partition Table header size */
314 if (le32_to_cpu((*gpt)->header_size) >
315 bdev_logical_block_size(state->bdev)) {
316 pr_debug("GUID Partition Table Header size is wrong: %u > %u\n",
317 le32_to_cpu((*gpt)->header_size),
318 bdev_logical_block_size(state->bdev));
319 goto fail;
320 }
321
313 /* Check the GUID Partition Table CRC */ 322 /* Check the GUID Partition Table CRC */
314 origcrc = le32_to_cpu((*gpt)->header_crc32); 323 origcrc = le32_to_cpu((*gpt)->header_crc32);
315 (*gpt)->header_crc32 = 0; 324 (*gpt)->header_crc32 = 0;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 5e4f776b0917..9b45ee84fbcc 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -131,7 +131,7 @@ static inline void task_name(struct seq_file *m, struct task_struct *p)
131 * you can test for combinations of others with 131 * you can test for combinations of others with
132 * simple bit tests. 132 * simple bit tests.
133 */ 133 */
134static const char *task_state_array[] = { 134static const char * const task_state_array[] = {
135 "R (running)", /* 0 */ 135 "R (running)", /* 0 */
136 "S (sleeping)", /* 1 */ 136 "S (sleeping)", /* 1 */
137 "D (disk sleep)", /* 2 */ 137 "D (disk sleep)", /* 2 */
@@ -147,7 +147,7 @@ static const char *task_state_array[] = {
147static inline const char *get_task_state(struct task_struct *tsk) 147static inline const char *get_task_state(struct task_struct *tsk)
148{ 148{
149 unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state; 149 unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state;
150 const char **p = &task_state_array[0]; 150 const char * const *p = &task_state_array[0];
151 151
152 BUILD_BUG_ON(1 + ilog2(TASK_STATE_MAX) != ARRAY_SIZE(task_state_array)); 152 BUILD_BUG_ON(1 + ilog2(TASK_STATE_MAX) != ARRAY_SIZE(task_state_array));
153 153
diff --git a/fs/proc/base.c b/fs/proc/base.c
index dc8bca72b002..4ede550517a6 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -894,20 +894,20 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
894 if (!task) 894 if (!task)
895 goto out_no_task; 895 goto out_no_task;
896 896
897 copied = -ENOMEM;
898 page = (char *)__get_free_page(GFP_TEMPORARY);
899 if (!page)
900 goto out_task;
901
897 mm = check_mem_permission(task); 902 mm = check_mem_permission(task);
898 copied = PTR_ERR(mm); 903 copied = PTR_ERR(mm);
899 if (IS_ERR(mm)) 904 if (IS_ERR(mm))
900 goto out_task; 905 goto out_free;
901 906
902 copied = -EIO; 907 copied = -EIO;
903 if (file->private_data != (void *)((long)current->self_exec_id)) 908 if (file->private_data != (void *)((long)current->self_exec_id))
904 goto out_mm; 909 goto out_mm;
905 910
906 copied = -ENOMEM;
907 page = (char *)__get_free_page(GFP_TEMPORARY);
908 if (!page)
909 goto out_mm;
910
911 copied = 0; 911 copied = 0;
912 while (count > 0) { 912 while (count > 0) {
913 int this_len, retval; 913 int this_len, retval;
@@ -929,9 +929,11 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
929 count -= retval; 929 count -= retval;
930 } 930 }
931 *ppos = dst; 931 *ppos = dst;
932 free_page((unsigned long) page); 932
933out_mm: 933out_mm:
934 mmput(mm); 934 mmput(mm);
935out_free:
936 free_page((unsigned long) page);
935out_task: 937out_task:
936 put_task_struct(task); 938 put_task_struct(task);
937out_no_task: 939out_no_task:
@@ -1059,7 +1061,7 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
1059{ 1061{
1060 struct task_struct *task; 1062 struct task_struct *task;
1061 char buffer[PROC_NUMBUF]; 1063 char buffer[PROC_NUMBUF];
1062 long oom_adjust; 1064 int oom_adjust;
1063 unsigned long flags; 1065 unsigned long flags;
1064 int err; 1066 int err;
1065 1067
@@ -1071,7 +1073,7 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
1071 goto out; 1073 goto out;
1072 } 1074 }
1073 1075
1074 err = strict_strtol(strstrip(buffer), 0, &oom_adjust); 1076 err = kstrtoint(strstrip(buffer), 0, &oom_adjust);
1075 if (err) 1077 if (err)
1076 goto out; 1078 goto out;
1077 if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) && 1079 if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) &&
@@ -1168,7 +1170,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
1168 struct task_struct *task; 1170 struct task_struct *task;
1169 char buffer[PROC_NUMBUF]; 1171 char buffer[PROC_NUMBUF];
1170 unsigned long flags; 1172 unsigned long flags;
1171 long oom_score_adj; 1173 int oom_score_adj;
1172 int err; 1174 int err;
1173 1175
1174 memset(buffer, 0, sizeof(buffer)); 1176 memset(buffer, 0, sizeof(buffer));
@@ -1179,7 +1181,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
1179 goto out; 1181 goto out;
1180 } 1182 }
1181 1183
1182 err = strict_strtol(strstrip(buffer), 0, &oom_score_adj); 1184 err = kstrtoint(strstrip(buffer), 0, &oom_score_adj);
1183 if (err) 1185 if (err)
1184 goto out; 1186 goto out;
1185 if (oom_score_adj < OOM_SCORE_ADJ_MIN || 1187 if (oom_score_adj < OOM_SCORE_ADJ_MIN ||
@@ -1468,7 +1470,7 @@ sched_autogroup_write(struct file *file, const char __user *buf,
1468 struct inode *inode = file->f_path.dentry->d_inode; 1470 struct inode *inode = file->f_path.dentry->d_inode;
1469 struct task_struct *p; 1471 struct task_struct *p;
1470 char buffer[PROC_NUMBUF]; 1472 char buffer[PROC_NUMBUF];
1471 long nice; 1473 int nice;
1472 int err; 1474 int err;
1473 1475
1474 memset(buffer, 0, sizeof(buffer)); 1476 memset(buffer, 0, sizeof(buffer));
@@ -1477,9 +1479,9 @@ sched_autogroup_write(struct file *file, const char __user *buf,
1477 if (copy_from_user(buffer, buf, count)) 1479 if (copy_from_user(buffer, buf, count))
1478 return -EFAULT; 1480 return -EFAULT;
1479 1481
1480 err = strict_strtol(strstrip(buffer), 0, &nice); 1482 err = kstrtoint(strstrip(buffer), 0, &nice);
1481 if (err) 1483 if (err < 0)
1482 return -EINVAL; 1484 return err;
1483 1485
1484 p = get_proc_task(inode); 1486 p = get_proc_task(inode);
1485 if (!p) 1487 if (!p)
@@ -1576,57 +1578,6 @@ static const struct file_operations proc_pid_set_comm_operations = {
1576 .release = single_release, 1578 .release = single_release,
1577}; 1579};
1578 1580
1579/*
1580 * We added or removed a vma mapping the executable. The vmas are only mapped
1581 * during exec and are not mapped with the mmap system call.
1582 * Callers must hold down_write() on the mm's mmap_sem for these
1583 */
1584void added_exe_file_vma(struct mm_struct *mm)
1585{
1586 mm->num_exe_file_vmas++;
1587}
1588
1589void removed_exe_file_vma(struct mm_struct *mm)
1590{
1591 mm->num_exe_file_vmas--;
1592 if ((mm->num_exe_file_vmas == 0) && mm->exe_file){
1593 fput(mm->exe_file);
1594 mm->exe_file = NULL;
1595 }
1596
1597}
1598
1599void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
1600{
1601 if (new_exe_file)
1602 get_file(new_exe_file);
1603 if (mm->exe_file)
1604 fput(mm->exe_file);
1605 mm->exe_file = new_exe_file;
1606 mm->num_exe_file_vmas = 0;
1607}
1608
1609struct file *get_mm_exe_file(struct mm_struct *mm)
1610{
1611 struct file *exe_file;
1612
1613 /* We need mmap_sem to protect against races with removal of
1614 * VM_EXECUTABLE vmas */
1615 down_read(&mm->mmap_sem);
1616 exe_file = mm->exe_file;
1617 if (exe_file)
1618 get_file(exe_file);
1619 up_read(&mm->mmap_sem);
1620 return exe_file;
1621}
1622
1623void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm)
1624{
1625 /* It's safe to write the exe_file pointer without exe_file_lock because
1626 * this is called during fork when the task is not yet in /proc */
1627 newmm->exe_file = get_mm_exe_file(oldmm);
1628}
1629
1630static int proc_exe_link(struct inode *inode, struct path *exe_path) 1581static int proc_exe_link(struct inode *inode, struct path *exe_path)
1631{ 1582{
1632 struct task_struct *task; 1583 struct task_struct *task;
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 1cffa2b8a2fc..9758b654a1bc 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -138,9 +138,9 @@ static int stat_open(struct inode *inode, struct file *file)
138 struct seq_file *m; 138 struct seq_file *m;
139 int res; 139 int res;
140 140
141 /* don't ask for more than the kmalloc() max size, currently 128 KB */ 141 /* don't ask for more than the kmalloc() max size */
142 if (size > 128 * 1024) 142 if (size > KMALLOC_MAX_SIZE)
143 size = 128 * 1024; 143 size = KMALLOC_MAX_SIZE;
144 buf = kmalloc(size, GFP_KERNEL); 144 buf = kmalloc(size, GFP_KERNEL);
145 if (!buf) 145 if (!buf)
146 return -ENOMEM; 146 return -ENOMEM;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index db15935fa757..25b6a887adb9 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -536,15 +536,17 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
536 char buffer[PROC_NUMBUF]; 536 char buffer[PROC_NUMBUF];
537 struct mm_struct *mm; 537 struct mm_struct *mm;
538 struct vm_area_struct *vma; 538 struct vm_area_struct *vma;
539 long type; 539 int type;
540 int rv;
540 541
541 memset(buffer, 0, sizeof(buffer)); 542 memset(buffer, 0, sizeof(buffer));
542 if (count > sizeof(buffer) - 1) 543 if (count > sizeof(buffer) - 1)
543 count = sizeof(buffer) - 1; 544 count = sizeof(buffer) - 1;
544 if (copy_from_user(buffer, buf, count)) 545 if (copy_from_user(buffer, buf, count))
545 return -EFAULT; 546 return -EFAULT;
546 if (strict_strtol(strstrip(buffer), 10, &type)) 547 rv = kstrtoint(strstrip(buffer), 10, &type);
547 return -EINVAL; 548 if (rv < 0)
549 return rv;
548 if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED) 550 if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
549 return -EINVAL; 551 return -EINVAL;
550 task = get_proc_task(file->f_path.dentry->d_inode); 552 task = get_proc_task(file->f_path.dentry->d_inode);
@@ -769,18 +771,12 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
769 if (!task) 771 if (!task)
770 goto out; 772 goto out;
771 773
772 mm = mm_for_maps(task);
773 ret = PTR_ERR(mm);
774 if (!mm || IS_ERR(mm))
775 goto out_task;
776
777 ret = -EINVAL; 774 ret = -EINVAL;
778 /* file position must be aligned */ 775 /* file position must be aligned */
779 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) 776 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
780 goto out_task; 777 goto out_task;
781 778
782 ret = 0; 779 ret = 0;
783
784 if (!count) 780 if (!count)
785 goto out_task; 781 goto out_task;
786 782
@@ -788,7 +784,12 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
788 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); 784 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
789 ret = -ENOMEM; 785 ret = -ENOMEM;
790 if (!pm.buffer) 786 if (!pm.buffer)
791 goto out_mm; 787 goto out_task;
788
789 mm = mm_for_maps(task);
790 ret = PTR_ERR(mm);
791 if (!mm || IS_ERR(mm))
792 goto out_free;
792 793
793 pagemap_walk.pmd_entry = pagemap_pte_range; 794 pagemap_walk.pmd_entry = pagemap_pte_range;
794 pagemap_walk.pte_hole = pagemap_pte_hole; 795 pagemap_walk.pte_hole = pagemap_pte_hole;
@@ -831,7 +832,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
831 len = min(count, PM_ENTRY_BYTES * pm.pos); 832 len = min(count, PM_ENTRY_BYTES * pm.pos);
832 if (copy_to_user(buf, pm.buffer, len)) { 833 if (copy_to_user(buf, pm.buffer, len)) {
833 ret = -EFAULT; 834 ret = -EFAULT;
834 goto out_free; 835 goto out_mm;
835 } 836 }
836 copied += len; 837 copied += len;
837 buf += len; 838 buf += len;
@@ -841,10 +842,10 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
841 if (!ret || ret == PM_END_OF_BUFFER) 842 if (!ret || ret == PM_END_OF_BUFFER)
842 ret = copied; 843 ret = copied;
843 844
844out_free:
845 kfree(pm.buffer);
846out_mm: 845out_mm:
847 mmput(mm); 846 mmput(mm);
847out_free:
848 kfree(pm.buffer);
848out_task: 849out_task:
849 put_task_struct(task); 850 put_task_struct(task);
850out: 851out:
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 74802bc5ded9..cd99bf557650 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -35,6 +35,46 @@ static u64 vmcore_size;
35 35
36static struct proc_dir_entry *proc_vmcore = NULL; 36static struct proc_dir_entry *proc_vmcore = NULL;
37 37
38/*
39 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
40 * The called function has to take care of module refcounting.
41 */
42static int (*oldmem_pfn_is_ram)(unsigned long pfn);
43
44int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
45{
46 if (oldmem_pfn_is_ram)
47 return -EBUSY;
48 oldmem_pfn_is_ram = fn;
49 return 0;
50}
51EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
52
53void unregister_oldmem_pfn_is_ram(void)
54{
55 oldmem_pfn_is_ram = NULL;
56 wmb();
57}
58EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
59
60static int pfn_is_ram(unsigned long pfn)
61{
62 int (*fn)(unsigned long pfn);
63 /* pfn is ram unless fn() checks pagetype */
64 int ret = 1;
65
66 /*
67 * Ask hypervisor if the pfn is really ram.
68 * A ballooned page contains no data and reading from such a page
69 * will cause high load in the hypervisor.
70 */
71 fn = oldmem_pfn_is_ram;
72 if (fn)
73 ret = fn(pfn);
74
75 return ret;
76}
77
38/* Reads a page from the oldmem device from given offset. */ 78/* Reads a page from the oldmem device from given offset. */
39static ssize_t read_from_oldmem(char *buf, size_t count, 79static ssize_t read_from_oldmem(char *buf, size_t count,
40 u64 *ppos, int userbuf) 80 u64 *ppos, int userbuf)
@@ -55,9 +95,15 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
55 else 95 else
56 nr_bytes = count; 96 nr_bytes = count;
57 97
58 tmp = copy_oldmem_page(pfn, buf, nr_bytes, offset, userbuf); 98 /* If pfn is not ram, return zeros for sparse dump files */
59 if (tmp < 0) 99 if (pfn_is_ram(pfn) == 0)
60 return tmp; 100 memset(buf, 0, nr_bytes);
101 else {
102 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
103 offset, userbuf);
104 if (tmp < 0)
105 return tmp;
106 }
61 *ppos += nr_bytes; 107 *ppos += nr_bytes;
62 count -= nr_bytes; 108 count -= nr_bytes;
63 buf += nr_bytes; 109 buf += nr_bytes;
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 8ab48bc2fa7d..ed0eb2a921f4 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 4b5a3fbb1f1f..f744be98cd5a 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -393,19 +393,36 @@ struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
393/* 393/*
394 * Read a filesystem table (uncompressed sequence of bytes) from disk 394 * Read a filesystem table (uncompressed sequence of bytes) from disk
395 */ 395 */
396int squashfs_read_table(struct super_block *sb, void *buffer, u64 block, 396void *squashfs_read_table(struct super_block *sb, u64 block, int length)
397 int length)
398{ 397{
399 int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 398 int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
400 int i, res; 399 int i, res;
401 void **data = kcalloc(pages, sizeof(void *), GFP_KERNEL); 400 void *table, *buffer, **data;
402 if (data == NULL) 401
403 return -ENOMEM; 402 table = buffer = kmalloc(length, GFP_KERNEL);
403 if (table == NULL)
404 return ERR_PTR(-ENOMEM);
405
406 data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
407 if (data == NULL) {
408 res = -ENOMEM;
409 goto failed;
410 }
404 411
405 for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) 412 for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE)
406 data[i] = buffer; 413 data[i] = buffer;
414
407 res = squashfs_read_data(sb, data, block, length | 415 res = squashfs_read_data(sb, data, block, length |
408 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages); 416 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages);
417
409 kfree(data); 418 kfree(data);
410 return res; 419
420 if (res < 0)
421 goto failed;
422
423 return table;
424
425failed:
426 kfree(table);
427 return ERR_PTR(res);
411} 428}
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
index e921bd213738..9f1b0bb96f13 100644
--- a/fs/squashfs/decompressor.c
+++ b/fs/squashfs/decompressor.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h
index 099745ad5691..8ba70cff09a6 100644
--- a/fs/squashfs/decompressor.h
+++ b/fs/squashfs/decompressor.h
@@ -4,7 +4,7 @@
4 * Squashfs - a compressed read only filesystem for Linux 4 * Squashfs - a compressed read only filesystem for Linux
5 * 5 *
6 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 6 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
7 * Phillip Lougher <phillip@lougher.demon.co.uk> 7 * Phillip Lougher <phillip@squashfs.org.uk>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c
index 3f79cd1d0c19..9dfe2ce0fb70 100644
--- a/fs/squashfs/dir.c
+++ b/fs/squashfs/dir.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c
index 7f93d5a9ee05..730c56248c9b 100644
--- a/fs/squashfs/export.c
+++ b/fs/squashfs/export.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -121,30 +121,38 @@ static struct dentry *squashfs_get_parent(struct dentry *child)
121 * Read uncompressed inode lookup table indexes off disk into memory 121 * Read uncompressed inode lookup table indexes off disk into memory
122 */ 122 */
123__le64 *squashfs_read_inode_lookup_table(struct super_block *sb, 123__le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
124 u64 lookup_table_start, unsigned int inodes) 124 u64 lookup_table_start, u64 next_table, unsigned int inodes)
125{ 125{
126 unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes); 126 unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes);
127 __le64 *inode_lookup_table; 127 __le64 *table;
128 int err;
129 128
130 TRACE("In read_inode_lookup_table, length %d\n", length); 129 TRACE("In read_inode_lookup_table, length %d\n", length);
131 130
132 /* Allocate inode lookup table indexes */ 131 /* Sanity check values */
133 inode_lookup_table = kmalloc(length, GFP_KERNEL); 132
134 if (inode_lookup_table == NULL) { 133 /* there should always be at least one inode */
135 ERROR("Failed to allocate inode lookup table\n"); 134 if (inodes == 0)
136 return ERR_PTR(-ENOMEM); 135 return ERR_PTR(-EINVAL);
137 } 136
137 /* length bytes should not extend into the next table - this check
138 * also traps instances where lookup_table_start is incorrectly larger
139 * than the next table start
140 */
141 if (lookup_table_start + length > next_table)
142 return ERR_PTR(-EINVAL);
143
144 table = squashfs_read_table(sb, lookup_table_start, length);
138 145
139 err = squashfs_read_table(sb, inode_lookup_table, lookup_table_start, 146 /*
140 length); 147 * table[0] points to the first inode lookup table metadata block,
141 if (err < 0) { 148 * this should be less than lookup_table_start
142 ERROR("unable to read inode lookup table\n"); 149 */
143 kfree(inode_lookup_table); 150 if (!IS_ERR(table) && table[0] >= lookup_table_start) {
144 return ERR_PTR(err); 151 kfree(table);
152 return ERR_PTR(-EINVAL);
145 } 153 }
146 154
147 return inode_lookup_table; 155 return table;
148} 156}
149 157
150 158
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index a25c5060bdcb..38bb1c640559 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c
index 7eef571443c6..1516a6490bfb 100644
--- a/fs/squashfs/fragment.c
+++ b/fs/squashfs/fragment.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -71,26 +71,29 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
71 * Read the uncompressed fragment lookup table indexes off disk into memory 71 * Read the uncompressed fragment lookup table indexes off disk into memory
72 */ 72 */
73__le64 *squashfs_read_fragment_index_table(struct super_block *sb, 73__le64 *squashfs_read_fragment_index_table(struct super_block *sb,
74 u64 fragment_table_start, unsigned int fragments) 74 u64 fragment_table_start, u64 next_table, unsigned int fragments)
75{ 75{
76 unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(fragments); 76 unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(fragments);
77 __le64 *fragment_index; 77 __le64 *table;
78 int err;
79 78
80 /* Allocate fragment lookup table indexes */ 79 /*
81 fragment_index = kmalloc(length, GFP_KERNEL); 80 * Sanity check, length bytes should not extend into the next table -
82 if (fragment_index == NULL) { 81 * this check also traps instances where fragment_table_start is
83 ERROR("Failed to allocate fragment index table\n"); 82 * incorrectly larger than the next table start
84 return ERR_PTR(-ENOMEM); 83 */
85 } 84 if (fragment_table_start + length > next_table)
85 return ERR_PTR(-EINVAL);
86
87 table = squashfs_read_table(sb, fragment_table_start, length);
86 88
87 err = squashfs_read_table(sb, fragment_index, fragment_table_start, 89 /*
88 length); 90 * table[0] points to the first fragment table metadata block, this
89 if (err < 0) { 91 * should be less than fragment_table_start
90 ERROR("unable to read fragment index table\n"); 92 */
91 kfree(fragment_index); 93 if (!IS_ERR(table) && table[0] >= fragment_table_start) {
92 return ERR_PTR(err); 94 kfree(table);
95 return ERR_PTR(-EINVAL);
93 } 96 }
94 97
95 return fragment_index; 98 return table;
96} 99}
diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c
index d8f32452638e..a70858e0fb44 100644
--- a/fs/squashfs/id.c
+++ b/fs/squashfs/id.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -66,27 +66,37 @@ int squashfs_get_id(struct super_block *sb, unsigned int index,
66 * Read uncompressed id lookup table indexes from disk into memory 66 * Read uncompressed id lookup table indexes from disk into memory
67 */ 67 */
68__le64 *squashfs_read_id_index_table(struct super_block *sb, 68__le64 *squashfs_read_id_index_table(struct super_block *sb,
69 u64 id_table_start, unsigned short no_ids) 69 u64 id_table_start, u64 next_table, unsigned short no_ids)
70{ 70{
71 unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids); 71 unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids);
72 __le64 *id_table; 72 __le64 *table;
73 int err;
74 73
75 TRACE("In read_id_index_table, length %d\n", length); 74 TRACE("In read_id_index_table, length %d\n", length);
76 75
77 /* Allocate id lookup table indexes */ 76 /* Sanity check values */
78 id_table = kmalloc(length, GFP_KERNEL); 77
79 if (id_table == NULL) { 78 /* there should always be at least one id */
80 ERROR("Failed to allocate id index table\n"); 79 if (no_ids == 0)
81 return ERR_PTR(-ENOMEM); 80 return ERR_PTR(-EINVAL);
82 } 81
82 /*
83 * length bytes should not extend into the next table - this check
84 * also traps instances where id_table_start is incorrectly larger
85 * than the next table start
86 */
87 if (id_table_start + length > next_table)
88 return ERR_PTR(-EINVAL);
89
90 table = squashfs_read_table(sb, id_table_start, length);
83 91
84 err = squashfs_read_table(sb, id_table, id_table_start, length); 92 /*
85 if (err < 0) { 93 * table[0] points to the first id lookup table metadata block, this
86 ERROR("unable to read id index table\n"); 94 * should be less than id_table_start
87 kfree(id_table); 95 */
88 return ERR_PTR(err); 96 if (!IS_ERR(table) && table[0] >= id_table_start) {
97 kfree(table);
98 return ERR_PTR(-EINVAL);
89 } 99 }
90 100
91 return id_table; 101 return table;
92} 102}
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
index 62e63ad25075..04bebcaa2373 100644
--- a/fs/squashfs/inode.c
+++ b/fs/squashfs/inode.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c
index 5d922a6701ab..4bc63ac64bc0 100644
--- a/fs/squashfs/namei.c
+++ b/fs/squashfs/namei.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index 1f2e608b8785..e3be6a71cfa7 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -44,24 +44,24 @@ extern struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *,
44 u64, int); 44 u64, int);
45extern struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *, 45extern struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *,
46 u64, int); 46 u64, int);
47extern int squashfs_read_table(struct super_block *, void *, u64, int); 47extern void *squashfs_read_table(struct super_block *, u64, int);
48 48
49/* decompressor.c */ 49/* decompressor.c */
50extern const struct squashfs_decompressor *squashfs_lookup_decompressor(int); 50extern const struct squashfs_decompressor *squashfs_lookup_decompressor(int);
51extern void *squashfs_decompressor_init(struct super_block *, unsigned short); 51extern void *squashfs_decompressor_init(struct super_block *, unsigned short);
52 52
53/* export.c */ 53/* export.c */
54extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64, 54extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64, u64,
55 unsigned int); 55 unsigned int);
56 56
57/* fragment.c */ 57/* fragment.c */
58extern int squashfs_frag_lookup(struct super_block *, unsigned int, u64 *); 58extern int squashfs_frag_lookup(struct super_block *, unsigned int, u64 *);
59extern __le64 *squashfs_read_fragment_index_table(struct super_block *, 59extern __le64 *squashfs_read_fragment_index_table(struct super_block *,
60 u64, unsigned int); 60 u64, u64, unsigned int);
61 61
62/* id.c */ 62/* id.c */
63extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *); 63extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
64extern __le64 *squashfs_read_id_index_table(struct super_block *, u64, 64extern __le64 *squashfs_read_id_index_table(struct super_block *, u64, u64,
65 unsigned short); 65 unsigned short);
66 66
67/* inode.c */ 67/* inode.c */
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 4582c568ef4d..b4a4e539a08c 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -4,7 +4,7 @@
4 * Squashfs 4 * Squashfs
5 * 5 *
6 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 6 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
7 * Phillip Lougher <phillip@lougher.demon.co.uk> 7 * Phillip Lougher <phillip@squashfs.org.uk>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/squashfs_fs_i.h b/fs/squashfs/squashfs_fs_i.h
index 359baefc01fc..73588e7700ed 100644
--- a/fs/squashfs/squashfs_fs_i.h
+++ b/fs/squashfs/squashfs_fs_i.h
@@ -4,7 +4,7 @@
4 * Squashfs 4 * Squashfs
5 * 5 *
6 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 6 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
7 * Phillip Lougher <phillip@lougher.demon.co.uk> 7 * Phillip Lougher <phillip@squashfs.org.uk>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
index d9037a5215f0..651f0b31d296 100644
--- a/fs/squashfs/squashfs_fs_sb.h
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -4,7 +4,7 @@
4 * Squashfs 4 * Squashfs
5 * 5 *
6 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 6 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
7 * Phillip Lougher <phillip@lougher.demon.co.uk> 7 * Phillip Lougher <phillip@squashfs.org.uk>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 5c8184c061a4..6f26abee3597 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -83,7 +83,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
83 long long root_inode; 83 long long root_inode;
84 unsigned short flags; 84 unsigned short flags;
85 unsigned int fragments; 85 unsigned int fragments;
86 u64 lookup_table_start, xattr_id_table_start; 86 u64 lookup_table_start, xattr_id_table_start, next_table;
87 int err; 87 int err;
88 88
89 TRACE("Entered squashfs_fill_superblock\n"); 89 TRACE("Entered squashfs_fill_superblock\n");
@@ -95,12 +95,6 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
95 } 95 }
96 msblk = sb->s_fs_info; 96 msblk = sb->s_fs_info;
97 97
98 sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
99 if (sblk == NULL) {
100 ERROR("Failed to allocate squashfs_super_block\n");
101 goto failure;
102 }
103
104 msblk->devblksize = sb_min_blocksize(sb, BLOCK_SIZE); 98 msblk->devblksize = sb_min_blocksize(sb, BLOCK_SIZE);
105 msblk->devblksize_log2 = ffz(~msblk->devblksize); 99 msblk->devblksize_log2 = ffz(~msblk->devblksize);
106 100
@@ -114,10 +108,12 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
114 * of bytes_used) we need to set it to an initial sensible dummy value 108 * of bytes_used) we need to set it to an initial sensible dummy value
115 */ 109 */
116 msblk->bytes_used = sizeof(*sblk); 110 msblk->bytes_used = sizeof(*sblk);
117 err = squashfs_read_table(sb, sblk, SQUASHFS_START, sizeof(*sblk)); 111 sblk = squashfs_read_table(sb, SQUASHFS_START, sizeof(*sblk));
118 112
119 if (err < 0) { 113 if (IS_ERR(sblk)) {
120 ERROR("unable to read squashfs_super_block\n"); 114 ERROR("unable to read squashfs_super_block\n");
115 err = PTR_ERR(sblk);
116 sblk = NULL;
121 goto failed_mount; 117 goto failed_mount;
122 } 118 }
123 119
@@ -218,18 +214,61 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
218 goto failed_mount; 214 goto failed_mount;
219 } 215 }
220 216
217 /* Handle xattrs */
218 sb->s_xattr = squashfs_xattr_handlers;
219 xattr_id_table_start = le64_to_cpu(sblk->xattr_id_table_start);
220 if (xattr_id_table_start == SQUASHFS_INVALID_BLK) {
221 next_table = msblk->bytes_used;
222 goto allocate_id_index_table;
223 }
224
225 /* Allocate and read xattr id lookup table */
226 msblk->xattr_id_table = squashfs_read_xattr_id_table(sb,
227 xattr_id_table_start, &msblk->xattr_table, &msblk->xattr_ids);
228 if (IS_ERR(msblk->xattr_id_table)) {
229 ERROR("unable to read xattr id index table\n");
230 err = PTR_ERR(msblk->xattr_id_table);
231 msblk->xattr_id_table = NULL;
232 if (err != -ENOTSUPP)
233 goto failed_mount;
234 }
235 next_table = msblk->xattr_table;
236
237allocate_id_index_table:
221 /* Allocate and read id index table */ 238 /* Allocate and read id index table */
222 msblk->id_table = squashfs_read_id_index_table(sb, 239 msblk->id_table = squashfs_read_id_index_table(sb,
223 le64_to_cpu(sblk->id_table_start), le16_to_cpu(sblk->no_ids)); 240 le64_to_cpu(sblk->id_table_start), next_table,
241 le16_to_cpu(sblk->no_ids));
224 if (IS_ERR(msblk->id_table)) { 242 if (IS_ERR(msblk->id_table)) {
243 ERROR("unable to read id index table\n");
225 err = PTR_ERR(msblk->id_table); 244 err = PTR_ERR(msblk->id_table);
226 msblk->id_table = NULL; 245 msblk->id_table = NULL;
227 goto failed_mount; 246 goto failed_mount;
228 } 247 }
248 next_table = msblk->id_table[0];
249
250 /* Handle inode lookup table */
251 lookup_table_start = le64_to_cpu(sblk->lookup_table_start);
252 if (lookup_table_start == SQUASHFS_INVALID_BLK)
253 goto handle_fragments;
254
255 /* Allocate and read inode lookup table */
256 msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb,
257 lookup_table_start, next_table, msblk->inodes);
258 if (IS_ERR(msblk->inode_lookup_table)) {
259 ERROR("unable to read inode lookup table\n");
260 err = PTR_ERR(msblk->inode_lookup_table);
261 msblk->inode_lookup_table = NULL;
262 goto failed_mount;
263 }
264 next_table = msblk->inode_lookup_table[0];
229 265
266 sb->s_export_op = &squashfs_export_ops;
267
268handle_fragments:
230 fragments = le32_to_cpu(sblk->fragments); 269 fragments = le32_to_cpu(sblk->fragments);
231 if (fragments == 0) 270 if (fragments == 0)
232 goto allocate_lookup_table; 271 goto check_directory_table;
233 272
234 msblk->fragment_cache = squashfs_cache_init("fragment", 273 msblk->fragment_cache = squashfs_cache_init("fragment",
235 SQUASHFS_CACHED_FRAGMENTS, msblk->block_size); 274 SQUASHFS_CACHED_FRAGMENTS, msblk->block_size);
@@ -240,45 +279,29 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
240 279
241 /* Allocate and read fragment index table */ 280 /* Allocate and read fragment index table */
242 msblk->fragment_index = squashfs_read_fragment_index_table(sb, 281 msblk->fragment_index = squashfs_read_fragment_index_table(sb,
243 le64_to_cpu(sblk->fragment_table_start), fragments); 282 le64_to_cpu(sblk->fragment_table_start), next_table, fragments);
244 if (IS_ERR(msblk->fragment_index)) { 283 if (IS_ERR(msblk->fragment_index)) {
284 ERROR("unable to read fragment index table\n");
245 err = PTR_ERR(msblk->fragment_index); 285 err = PTR_ERR(msblk->fragment_index);
246 msblk->fragment_index = NULL; 286 msblk->fragment_index = NULL;
247 goto failed_mount; 287 goto failed_mount;
248 } 288 }
289 next_table = msblk->fragment_index[0];
249 290
250allocate_lookup_table: 291check_directory_table:
251 lookup_table_start = le64_to_cpu(sblk->lookup_table_start); 292 /* Sanity check directory_table */
252 if (lookup_table_start == SQUASHFS_INVALID_BLK) 293 if (msblk->directory_table >= next_table) {
253 goto allocate_xattr_table; 294 err = -EINVAL;
254
255 /* Allocate and read inode lookup table */
256 msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb,
257 lookup_table_start, msblk->inodes);
258 if (IS_ERR(msblk->inode_lookup_table)) {
259 err = PTR_ERR(msblk->inode_lookup_table);
260 msblk->inode_lookup_table = NULL;
261 goto failed_mount; 295 goto failed_mount;
262 } 296 }
263 297
264 sb->s_export_op = &squashfs_export_ops; 298 /* Sanity check inode_table */
265 299 if (msblk->inode_table >= msblk->directory_table) {
266allocate_xattr_table: 300 err = -EINVAL;
267 sb->s_xattr = squashfs_xattr_handlers; 301 goto failed_mount;
268 xattr_id_table_start = le64_to_cpu(sblk->xattr_id_table_start);
269 if (xattr_id_table_start == SQUASHFS_INVALID_BLK)
270 goto allocate_root;
271
272 /* Allocate and read xattr id lookup table */
273 msblk->xattr_id_table = squashfs_read_xattr_id_table(sb,
274 xattr_id_table_start, &msblk->xattr_table, &msblk->xattr_ids);
275 if (IS_ERR(msblk->xattr_id_table)) {
276 err = PTR_ERR(msblk->xattr_id_table);
277 msblk->xattr_id_table = NULL;
278 if (err != -ENOTSUPP)
279 goto failed_mount;
280 } 302 }
281allocate_root: 303
304 /* allocate root */
282 root = new_inode(sb); 305 root = new_inode(sb);
283 if (!root) { 306 if (!root) {
284 err = -ENOMEM; 307 err = -ENOMEM;
@@ -318,11 +341,6 @@ failed_mount:
318 sb->s_fs_info = NULL; 341 sb->s_fs_info = NULL;
319 kfree(sblk); 342 kfree(sblk);
320 return err; 343 return err;
321
322failure:
323 kfree(sb->s_fs_info);
324 sb->s_fs_info = NULL;
325 return -ENOMEM;
326} 344}
327 345
328 346
@@ -475,5 +493,5 @@ static const struct super_operations squashfs_super_ops = {
475module_init(init_squashfs_fs); 493module_init(init_squashfs_fs);
476module_exit(exit_squashfs_fs); 494module_exit(exit_squashfs_fs);
477MODULE_DESCRIPTION("squashfs 4.0, a compressed read-only filesystem"); 495MODULE_DESCRIPTION("squashfs 4.0, a compressed read-only filesystem");
478MODULE_AUTHOR("Phillip Lougher <phillip@lougher.demon.co.uk>"); 496MODULE_AUTHOR("Phillip Lougher <phillip@squashfs.org.uk>");
479MODULE_LICENSE("GPL"); 497MODULE_LICENSE("GPL");
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
index ec86434921e1..1191817264cc 100644
--- a/fs/squashfs/symlink.c
+++ b/fs/squashfs/symlink.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/xattr.c b/fs/squashfs/xattr.c
index 3876c36699a1..92fcde7b4d61 100644
--- a/fs/squashfs/xattr.c
+++ b/fs/squashfs/xattr.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2010 4 * Copyright (c) 2010
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h
index b634efce4bde..c83f5d9ec125 100644
--- a/fs/squashfs/xattr.h
+++ b/fs/squashfs/xattr.h
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2010 4 * Copyright (c) 2010
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -31,6 +31,7 @@ static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb,
31 u64 start, u64 *xattr_table_start, int *xattr_ids) 31 u64 start, u64 *xattr_table_start, int *xattr_ids)
32{ 32{
33 ERROR("Xattrs in filesystem, these will be ignored\n"); 33 ERROR("Xattrs in filesystem, these will be ignored\n");
34 *xattr_table_start = start;
34 return ERR_PTR(-ENOTSUPP); 35 return ERR_PTR(-ENOTSUPP);
35} 36}
36 37
diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c
index 05385dbe1465..c89607d690c4 100644
--- a/fs/squashfs/xattr_id.c
+++ b/fs/squashfs/xattr_id.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2010 4 * Copyright (c) 2010
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -67,34 +67,29 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
67 u64 *xattr_table_start, int *xattr_ids) 67 u64 *xattr_table_start, int *xattr_ids)
68{ 68{
69 unsigned int len; 69 unsigned int len;
70 __le64 *xid_table; 70 struct squashfs_xattr_id_table *id_table;
71 struct squashfs_xattr_id_table id_table; 71
72 int err; 72 id_table = squashfs_read_table(sb, start, sizeof(*id_table));
73 if (IS_ERR(id_table))
74 return (__le64 *) id_table;
75
76 *xattr_table_start = le64_to_cpu(id_table->xattr_table_start);
77 *xattr_ids = le32_to_cpu(id_table->xattr_ids);
78 kfree(id_table);
79
80 /* Sanity check values */
81
82 /* there is always at least one xattr id */
83 if (*xattr_ids == 0)
84 return ERR_PTR(-EINVAL);
85
86 /* xattr_table should be less than start */
87 if (*xattr_table_start >= start)
88 return ERR_PTR(-EINVAL);
73 89
74 err = squashfs_read_table(sb, &id_table, start, sizeof(id_table));
75 if (err < 0) {
76 ERROR("unable to read xattr id table\n");
77 return ERR_PTR(err);
78 }
79 *xattr_table_start = le64_to_cpu(id_table.xattr_table_start);
80 *xattr_ids = le32_to_cpu(id_table.xattr_ids);
81 len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids); 90 len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
82 91
83 TRACE("In read_xattr_index_table, length %d\n", len); 92 TRACE("In read_xattr_index_table, length %d\n", len);
84 93
85 /* Allocate xattr id lookup table indexes */ 94 return squashfs_read_table(sb, start + sizeof(*id_table), len);
86 xid_table = kmalloc(len, GFP_KERNEL);
87 if (xid_table == NULL) {
88 ERROR("Failed to allocate xattr id index table\n");
89 return ERR_PTR(-ENOMEM);
90 }
91
92 err = squashfs_read_table(sb, xid_table, start + sizeof(id_table), len);
93 if (err < 0) {
94 ERROR("unable to read xattr id index table\n");
95 kfree(xid_table);
96 return ERR_PTR(err);
97 }
98
99 return xid_table;
100} 95}
diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c
index aa47a286d1f8..1760b7d108f6 100644
--- a/fs/squashfs/xz_wrapper.c
+++ b/fs/squashfs/xz_wrapper.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
index 517688b32ffa..55d918fd2d86 100644
--- a/fs/squashfs/zlib_wrapper.c
+++ b/fs/squashfs/zlib_wrapper.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 46f7a807bbc1..42694e11c23d 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -424,8 +424,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
424 ufs_cpu_to_data_ptr(sb, p, result); 424 ufs_cpu_to_data_ptr(sb, p, result);
425 *err = 0; 425 *err = 0;
426 UFS_I(inode)->i_lastfrag = 426 UFS_I(inode)->i_lastfrag =
427 max_t(u32, UFS_I(inode)->i_lastfrag, 427 max(UFS_I(inode)->i_lastfrag, fragment + count);
428 fragment + count);
429 ufs_clear_frags(inode, result + oldcount, 428 ufs_clear_frags(inode, result + oldcount,
430 newcount - oldcount, locked_page != NULL); 429 newcount - oldcount, locked_page != NULL);
431 } 430 }
@@ -440,7 +439,8 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
440 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err); 439 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
441 if (result) { 440 if (result) {
442 *err = 0; 441 *err = 0;
443 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); 442 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
443 fragment + count);
444 ufs_clear_frags(inode, result + oldcount, newcount - oldcount, 444 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
445 locked_page != NULL); 445 locked_page != NULL);
446 unlock_super(sb); 446 unlock_super(sb);
@@ -479,7 +479,8 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
479 uspi->s_sbbase + result, locked_page); 479 uspi->s_sbbase + result, locked_page);
480 ufs_cpu_to_data_ptr(sb, p, result); 480 ufs_cpu_to_data_ptr(sb, p, result);
481 *err = 0; 481 *err = 0;
482 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); 482 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
483 fragment + count);
483 unlock_super(sb); 484 unlock_super(sb);
484 if (newcount < request) 485 if (newcount < request)
485 ufs_free_fragments (inode, result + newcount, request - newcount); 486 ufs_free_fragments (inode, result + newcount, request - newcount);
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index 5f821dbc0579..f04f89fbd4d9 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -84,7 +84,7 @@ static int ufs_trunc_direct(struct inode *inode)
84 retry = 0; 84 retry = 0;
85 85
86 frag1 = DIRECT_FRAGMENT; 86 frag1 = DIRECT_FRAGMENT;
87 frag4 = min_t(u32, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag); 87 frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
88 frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1); 88 frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
89 frag3 = frag4 & ~uspi->s_fpbmask; 89 frag3 = frag4 & ~uspi->s_fpbmask;
90 block1 = block2 = 0; 90 block1 = block2 = 0;
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
index 110fa700f853..71c778033f57 100644
--- a/include/asm-generic/bitops/find.h
+++ b/include/asm-generic/bitops/find.h
@@ -1,6 +1,7 @@
1#ifndef _ASM_GENERIC_BITOPS_FIND_H_ 1#ifndef _ASM_GENERIC_BITOPS_FIND_H_
2#define _ASM_GENERIC_BITOPS_FIND_H_ 2#define _ASM_GENERIC_BITOPS_FIND_H_
3 3
4#ifndef find_next_bit
4/** 5/**
5 * find_next_bit - find the next set bit in a memory region 6 * find_next_bit - find the next set bit in a memory region
6 * @addr: The address to base the search on 7 * @addr: The address to base the search on
@@ -9,7 +10,9 @@
9 */ 10 */
10extern unsigned long find_next_bit(const unsigned long *addr, unsigned long 11extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
11 size, unsigned long offset); 12 size, unsigned long offset);
13#endif
12 14
15#ifndef find_next_zero_bit
13/** 16/**
14 * find_next_zero_bit - find the next cleared bit in a memory region 17 * find_next_zero_bit - find the next cleared bit in a memory region
15 * @addr: The address to base the search on 18 * @addr: The address to base the search on
@@ -18,6 +21,7 @@ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
18 */ 21 */
19extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned 22extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
20 long size, unsigned long offset); 23 long size, unsigned long offset);
24#endif
21 25
22#ifdef CONFIG_GENERIC_FIND_FIRST_BIT 26#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
23 27
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h
index 946a21b1b5dc..f95c663a6a41 100644
--- a/include/asm-generic/bitops/le.h
+++ b/include/asm-generic/bitops/le.h
@@ -30,13 +30,20 @@ static inline unsigned long find_first_zero_bit_le(const void *addr,
30 30
31#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) 31#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
32 32
33#ifndef find_next_zero_bit_le
33extern unsigned long find_next_zero_bit_le(const void *addr, 34extern unsigned long find_next_zero_bit_le(const void *addr,
34 unsigned long size, unsigned long offset); 35 unsigned long size, unsigned long offset);
36#endif
37
38#ifndef find_next_bit_le
35extern unsigned long find_next_bit_le(const void *addr, 39extern unsigned long find_next_bit_le(const void *addr,
36 unsigned long size, unsigned long offset); 40 unsigned long size, unsigned long offset);
41#endif
37 42
43#ifndef find_first_zero_bit_le
38#define find_first_zero_bit_le(addr, size) \ 44#define find_first_zero_bit_le(addr, size) \
39 find_next_zero_bit_le((addr), (size), 0) 45 find_next_zero_bit_le((addr), (size), 0)
46#endif
40 47
41#else 48#else
42#error "Please fix <asm/byteorder.h>" 49#error "Please fix <asm/byteorder.h>"
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 91784841e407..dfb0ec666c94 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -162,46 +162,6 @@ extern void warn_slowpath_null(const char *file, const int line);
162 unlikely(__ret_warn_once); \ 162 unlikely(__ret_warn_once); \
163}) 163})
164 164
165#ifdef CONFIG_PRINTK
166
167#define WARN_ON_RATELIMIT(condition, state) \
168 WARN_ON((condition) && __ratelimit(state))
169
170#define __WARN_RATELIMIT(condition, state, format...) \
171({ \
172 int rtn = 0; \
173 if (unlikely(__ratelimit(state))) \
174 rtn = WARN(condition, format); \
175 rtn; \
176})
177
178#define WARN_RATELIMIT(condition, format...) \
179({ \
180 static DEFINE_RATELIMIT_STATE(_rs, \
181 DEFAULT_RATELIMIT_INTERVAL, \
182 DEFAULT_RATELIMIT_BURST); \
183 __WARN_RATELIMIT(condition, &_rs, format); \
184})
185
186#else
187
188#define WARN_ON_RATELIMIT(condition, state) \
189 WARN_ON(condition)
190
191#define __WARN_RATELIMIT(condition, state, format...) \
192({ \
193 int rtn = WARN(condition, format); \
194 rtn; \
195})
196
197#define WARN_RATELIMIT(condition, format...) \
198({ \
199 int rtn = WARN(condition, format); \
200 rtn; \
201})
202
203#endif
204
205/* 165/*
206 * WARN_ON_SMP() is for cases that the warning is either 166 * WARN_ON_SMP() is for cases that the warning is either
207 * meaningless for !SMP or may even cause failures. 167 * meaningless for !SMP or may even cause failures.
diff --git a/include/asm-generic/ptrace.h b/include/asm-generic/ptrace.h
new file mode 100644
index 000000000000..82e674f6b337
--- /dev/null
+++ b/include/asm-generic/ptrace.h
@@ -0,0 +1,74 @@
1/*
2 * Common low level (register) ptrace helpers
3 *
4 * Copyright 2004-2011 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#ifndef __ASM_GENERIC_PTRACE_H__
10#define __ASM_GENERIC_PTRACE_H__
11
12#ifndef __ASSEMBLY__
13
14/* Helpers for working with the instruction pointer */
15#ifndef GET_IP
16#define GET_IP(regs) ((regs)->pc)
17#endif
18#ifndef SET_IP
19#define SET_IP(regs, val) (GET_IP(regs) = (val))
20#endif
21
22static inline unsigned long instruction_pointer(struct pt_regs *regs)
23{
24 return GET_IP(regs);
25}
26static inline void instruction_pointer_set(struct pt_regs *regs,
27 unsigned long val)
28{
29 SET_IP(regs, val);
30}
31
32#ifndef profile_pc
33#define profile_pc(regs) instruction_pointer(regs)
34#endif
35
36/* Helpers for working with the user stack pointer */
37#ifndef GET_USP
38#define GET_USP(regs) ((regs)->usp)
39#endif
40#ifndef SET_USP
41#define SET_USP(regs, val) (GET_USP(regs) = (val))
42#endif
43
44static inline unsigned long user_stack_pointer(struct pt_regs *regs)
45{
46 return GET_USP(regs);
47}
48static inline void user_stack_pointer_set(struct pt_regs *regs,
49 unsigned long val)
50{
51 SET_USP(regs, val);
52}
53
54/* Helpers for working with the frame pointer */
55#ifndef GET_FP
56#define GET_FP(regs) ((regs)->fp)
57#endif
58#ifndef SET_FP
59#define SET_FP(regs, val) (GET_FP(regs) = (val))
60#endif
61
62static inline unsigned long frame_pointer(struct pt_regs *regs)
63{
64 return GET_FP(regs);
65}
66static inline void frame_pointer_set(struct pt_regs *regs,
67 unsigned long val)
68{
69 SET_FP(regs, val);
70}
71
72#endif /* __ASSEMBLY__ */
73
74#endif
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index 33d524704883..ae90e0f63995 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -681,9 +681,11 @@ __SC_COMP(__NR_open_by_handle_at, sys_open_by_handle_at, \
681__SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime) 681__SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime)
682#define __NR_syncfs 267 682#define __NR_syncfs 267
683__SYSCALL(__NR_syncfs, sys_syncfs) 683__SYSCALL(__NR_syncfs, sys_syncfs)
684#define __NR_setns 268
685__SYSCALL(__NR_setns, sys_setns)
684 686
685#undef __NR_syscalls 687#undef __NR_syscalls
686#define __NR_syscalls 268 688#define __NR_syscalls 269
687 689
688/* 690/*
689 * All syscalls below here should go away really, 691 * All syscalls below here should go away really,
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 2184c6b97aeb..a3ef66a2a083 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -148,7 +148,7 @@ static inline unsigned long __ffs64(u64 word)
148 148
149#ifdef __KERNEL__ 149#ifdef __KERNEL__
150 150
151#ifdef CONFIG_GENERIC_FIND_LAST_BIT 151#ifndef find_last_bit
152/** 152/**
153 * find_last_bit - find the last set bit in a memory region 153 * find_last_bit - find the last set bit in a memory region
154 * @addr: The address to start the search at 154 * @addr: The address to start the search at
@@ -158,7 +158,7 @@ static inline unsigned long __ffs64(u64 word)
158 */ 158 */
159extern unsigned long find_last_bit(const unsigned long *addr, 159extern unsigned long find_last_bit(const unsigned long *addr,
160 unsigned long size); 160 unsigned long size);
161#endif /* CONFIG_GENERIC_FIND_LAST_BIT */ 161#endif
162 162
163#endif /* __KERNEL__ */ 163#endif /* __KERNEL__ */
164#endif 164#endif
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 5ac7ebc36dbb..ab4ac0ccb857 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -467,12 +467,14 @@ struct cgroup_subsys {
467 int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); 467 int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
468 void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); 468 void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
469 int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, 469 int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
470 struct task_struct *tsk, bool threadgroup); 470 struct task_struct *tsk);
471 int (*can_attach_task)(struct cgroup *cgrp, struct task_struct *tsk);
471 void (*cancel_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, 472 void (*cancel_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
472 struct task_struct *tsk, bool threadgroup); 473 struct task_struct *tsk);
474 void (*pre_attach)(struct cgroup *cgrp);
475 void (*attach_task)(struct cgroup *cgrp, struct task_struct *tsk);
473 void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, 476 void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
474 struct cgroup *old_cgrp, struct task_struct *tsk, 477 struct cgroup *old_cgrp, struct task_struct *tsk);
475 bool threadgroup);
476 void (*fork)(struct cgroup_subsys *ss, struct task_struct *task); 478 void (*fork)(struct cgroup_subsys *ss, struct task_struct *task);
477 void (*exit)(struct cgroup_subsys *ss, struct cgroup *cgrp, 479 void (*exit)(struct cgroup_subsys *ss, struct cgroup *cgrp,
478 struct cgroup *old_cgrp, struct task_struct *task); 480 struct cgroup *old_cgrp, struct task_struct *task);
@@ -553,9 +555,6 @@ static inline struct cgroup* task_cgroup(struct task_struct *task,
553 return task_subsys_state(task, subsys_id)->cgroup; 555 return task_subsys_state(task, subsys_id)->cgroup;
554} 556}
555 557
556int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *ss,
557 char *nodename);
558
559/* A cgroup_iter should be treated as an opaque object */ 558/* A cgroup_iter should be treated as an opaque object */
560struct cgroup_iter { 559struct cgroup_iter {
561 struct list_head *cg_link; 560 struct list_head *cg_link;
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index cdbfcb8780ec..ac663c18776c 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -19,12 +19,6 @@ SUBSYS(debug)
19 19
20/* */ 20/* */
21 21
22#ifdef CONFIG_CGROUP_NS
23SUBSYS(ns)
24#endif
25
26/* */
27
28#ifdef CONFIG_CGROUP_SCHED 22#ifdef CONFIG_CGROUP_SCHED
29SUBSYS(cpu_cgroup) 23SUBSYS(cpu_cgroup)
30#endif 24#endif
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 088cd4ace4ef..74054074e876 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -66,6 +66,11 @@ static inline void vmcore_unusable(void)
66 if (is_kdump_kernel()) 66 if (is_kdump_kernel())
67 elfcorehdr_addr = ELFCORE_ADDR_ERR; 67 elfcorehdr_addr = ELFCORE_ADDR_ERR;
68} 68}
69
70#define HAVE_OLDMEM_PFN_IS_RAM 1
71extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn));
72extern void unregister_oldmem_pfn_is_ram(void);
73
69#else /* !CONFIG_CRASH_DUMP */ 74#else /* !CONFIG_CRASH_DUMP */
70static inline int is_kdump_kernel(void) { return 0; } 75static inline int is_kdump_kernel(void) { return 0; }
71#endif /* CONFIG_CRASH_DUMP */ 76#endif /* CONFIG_CRASH_DUMP */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index be16b61283cc..82607992f308 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -1,4 +1,4 @@
1/* Credentials management - see Documentation/credentials.txt 1/* Credentials management - see Documentation/security/credentials.txt
2 * 2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h
index ebeb2f3ad068..6843cf193a44 100644
--- a/include/linux/flex_array.h
+++ b/include/linux/flex_array.h
@@ -21,6 +21,8 @@ struct flex_array {
21 struct { 21 struct {
22 int element_size; 22 int element_size;
23 int total_nr_elements; 23 int total_nr_elements;
24 int elems_per_part;
25 u32 reciprocal_elems;
24 struct flex_array_part *parts[]; 26 struct flex_array_part *parts[];
25 }; 27 };
26 /* 28 /*
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 0f1325d98295..0065ffd3226b 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -132,10 +132,6 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
132 132
133int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); 133int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
134 134
135#ifdef CONFIG_SYSCTL
136extern struct ctl_table ether_table[];
137#endif
138
139int mac_pton(const char *s, u8 *mac); 135int mac_pton(const char *s, u8 *mac);
140extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); 136extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
141 137
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index bafc58c00fc3..580f70c02391 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -22,6 +22,14 @@
22extern struct files_struct init_files; 22extern struct files_struct init_files;
23extern struct fs_struct init_fs; 23extern struct fs_struct init_fs;
24 24
25#ifdef CONFIG_CGROUPS
26#define INIT_THREADGROUP_FORK_LOCK(sig) \
27 .threadgroup_fork_lock = \
28 __RWSEM_INITIALIZER(sig.threadgroup_fork_lock),
29#else
30#define INIT_THREADGROUP_FORK_LOCK(sig)
31#endif
32
25#define INIT_SIGNALS(sig) { \ 33#define INIT_SIGNALS(sig) { \
26 .nr_threads = 1, \ 34 .nr_threads = 1, \
27 .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ 35 .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
@@ -38,6 +46,7 @@ extern struct fs_struct init_fs;
38 }, \ 46 }, \
39 .cred_guard_mutex = \ 47 .cred_guard_mutex = \
40 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ 48 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
49 INIT_THREADGROUP_FORK_LOCK(sig) \
41} 50}
42 51
43extern struct nsproxy init_nsproxy; 52extern struct nsproxy init_nsproxy;
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 906590aa6907..204f9cd26c16 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -236,7 +236,7 @@ static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg)
236 directory for this interface. Note that the entry will 236 directory for this interface. Note that the entry will
237 automatically be dstroyed when the interface is destroyed. */ 237 automatically be dstroyed when the interface is destroyed. */
238int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, 238int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
239 read_proc_t *read_proc, 239 const struct file_operations *proc_ops,
240 void *data); 240 void *data);
241 241
242#endif /* __LINUX_IPMI_SMI_H */ 242#endif /* __LINUX_IPMI_SMI_H */
diff --git a/include/linux/key.h b/include/linux/key.h
index ef19b99aff98..6ea4eebd3467 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -9,7 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 * 10 *
11 * 11 *
12 * See Documentation/keys.txt for information on keys/keyrings. 12 * See Documentation/security/keys.txt for information on keys/keyrings.
13 */ 13 */
14 14
15#ifndef _LINUX_KEY_H 15#ifndef _LINUX_KEY_H
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 5e9840f50980..9724a38ee69d 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -20,6 +20,8 @@
20#ifndef _LINUX_MEMCONTROL_H 20#ifndef _LINUX_MEMCONTROL_H
21#define _LINUX_MEMCONTROL_H 21#define _LINUX_MEMCONTROL_H
22#include <linux/cgroup.h> 22#include <linux/cgroup.h>
23#include <linux/vm_event_item.h>
24
23struct mem_cgroup; 25struct mem_cgroup;
24struct page_cgroup; 26struct page_cgroup;
25struct page; 27struct page;
@@ -106,9 +108,10 @@ extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
106 */ 108 */
107int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); 109int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg);
108int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg); 110int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg);
109unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, 111int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
110 struct zone *zone, 112unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
111 enum lru_list lru); 113 struct zone *zone,
114 enum lru_list lru);
112struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, 115struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
113 struct zone *zone); 116 struct zone *zone);
114struct zone_reclaim_stat* 117struct zone_reclaim_stat*
@@ -144,9 +147,11 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
144} 147}
145 148
146unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 149unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
147 gfp_t gfp_mask); 150 gfp_t gfp_mask,
151 unsigned long *total_scanned);
148u64 mem_cgroup_get_limit(struct mem_cgroup *mem); 152u64 mem_cgroup_get_limit(struct mem_cgroup *mem);
149 153
154void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
150#ifdef CONFIG_TRANSPARENT_HUGEPAGE 155#ifdef CONFIG_TRANSPARENT_HUGEPAGE
151void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail); 156void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail);
152#endif 157#endif
@@ -302,8 +307,8 @@ mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
302} 307}
303 308
304static inline unsigned long 309static inline unsigned long
305mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone, 310mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, struct zone *zone,
306 enum lru_list lru) 311 enum lru_list lru)
307{ 312{
308 return 0; 313 return 0;
309} 314}
@@ -338,7 +343,8 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
338 343
339static inline 344static inline
340unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 345unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
341 gfp_t gfp_mask) 346 gfp_t gfp_mask,
347 unsigned long *total_scanned)
342{ 348{
343 return 0; 349 return 0;
344} 350}
@@ -354,6 +360,10 @@ static inline void mem_cgroup_split_huge_fixup(struct page *head,
354{ 360{
355} 361}
356 362
363static inline
364void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
365{
366}
357#endif /* CONFIG_CGROUP_MEM_CONT */ 367#endif /* CONFIG_CGROUP_MEM_CONT */
358 368
359#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) 369#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h
index 69d1010e2e51..5ff2400ad46c 100644
--- a/include/linux/mfd/max8997-private.h
+++ b/include/linux/mfd/max8997-private.h
@@ -311,10 +311,6 @@ enum max8997_irq {
311 MAX8997_IRQ_NR, 311 MAX8997_IRQ_NR,
312}; 312};
313 313
314#define MAX8997_REG_BUCK1DVS(x) (MAX8997_REG_BUCK1DVS1 + (x) - 1)
315#define MAX8997_REG_BUCK2DVS(x) (MAX8997_REG_BUCK2DVS1 + (x) - 1)
316#define MAX8997_REG_BUCK5DVS(x) (MAX8997_REG_BUCK5DVS1 + (x) - 1)
317
318#define MAX8997_NUM_GPIO 12 314#define MAX8997_NUM_GPIO 12
319struct max8997_dev { 315struct max8997_dev {
320 struct device *dev; 316 struct device *dev;
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
new file mode 100644
index 000000000000..8bb85b930c07
--- /dev/null
+++ b/include/linux/mfd/tps65910.h
@@ -0,0 +1,800 @@
1/*
2 * tps65910.h -- TI TPS6591x
3 *
4 * Copyright 2010-2011 Texas Instruments Inc.
5 *
6 * Author: Graeme Gregory <gg@slimlogic.co.uk>
7 * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
8 * Author: Arnaud Deconinck <a-deconinck@ti.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16
17#ifndef __LINUX_MFD_TPS65910_H
18#define __LINUX_MFD_TPS65910_H
19
20/* TPS chip id list */
21#define TPS65910 0
22#define TPS65911 1
23
24/* TPS regulator type list */
25#define REGULATOR_LDO 0
26#define REGULATOR_DCDC 1
27
28/*
29 * List of registers for component TPS65910
30 *
31 */
32
33#define TPS65910_SECONDS 0x0
34#define TPS65910_MINUTES 0x1
35#define TPS65910_HOURS 0x2
36#define TPS65910_DAYS 0x3
37#define TPS65910_MONTHS 0x4
38#define TPS65910_YEARS 0x5
39#define TPS65910_WEEKS 0x6
40#define TPS65910_ALARM_SECONDS 0x8
41#define TPS65910_ALARM_MINUTES 0x9
42#define TPS65910_ALARM_HOURS 0xA
43#define TPS65910_ALARM_DAYS 0xB
44#define TPS65910_ALARM_MONTHS 0xC
45#define TPS65910_ALARM_YEARS 0xD
46#define TPS65910_RTC_CTRL 0x10
47#define TPS65910_RTC_STATUS 0x11
48#define TPS65910_RTC_INTERRUPTS 0x12
49#define TPS65910_RTC_COMP_LSB 0x13
50#define TPS65910_RTC_COMP_MSB 0x14
51#define TPS65910_RTC_RES_PROG 0x15
52#define TPS65910_RTC_RESET_STATUS 0x16
53#define TPS65910_BCK1 0x17
54#define TPS65910_BCK2 0x18
55#define TPS65910_BCK3 0x19
56#define TPS65910_BCK4 0x1A
57#define TPS65910_BCK5 0x1B
58#define TPS65910_PUADEN 0x1C
59#define TPS65910_REF 0x1D
60#define TPS65910_VRTC 0x1E
61#define TPS65910_VIO 0x20
62#define TPS65910_VDD1 0x21
63#define TPS65910_VDD1_OP 0x22
64#define TPS65910_VDD1_SR 0x23
65#define TPS65910_VDD2 0x24
66#define TPS65910_VDD2_OP 0x25
67#define TPS65910_VDD2_SR 0x26
68#define TPS65910_VDD3 0x27
69#define TPS65910_VDIG1 0x30
70#define TPS65910_VDIG2 0x31
71#define TPS65910_VAUX1 0x32
72#define TPS65910_VAUX2 0x33
73#define TPS65910_VAUX33 0x34
74#define TPS65910_VMMC 0x35
75#define TPS65910_VPLL 0x36
76#define TPS65910_VDAC 0x37
77#define TPS65910_THERM 0x38
78#define TPS65910_BBCH 0x39
79#define TPS65910_DCDCCTRL 0x3E
80#define TPS65910_DEVCTRL 0x3F
81#define TPS65910_DEVCTRL2 0x40
82#define TPS65910_SLEEP_KEEP_LDO_ON 0x41
83#define TPS65910_SLEEP_KEEP_RES_ON 0x42
84#define TPS65910_SLEEP_SET_LDO_OFF 0x43
85#define TPS65910_SLEEP_SET_RES_OFF 0x44
86#define TPS65910_EN1_LDO_ASS 0x45
87#define TPS65910_EN1_SMPS_ASS 0x46
88#define TPS65910_EN2_LDO_ASS 0x47
89#define TPS65910_EN2_SMPS_ASS 0x48
90#define TPS65910_EN3_LDO_ASS 0x49
91#define TPS65910_SPARE 0x4A
92#define TPS65910_INT_STS 0x50
93#define TPS65910_INT_MSK 0x51
94#define TPS65910_INT_STS2 0x52
95#define TPS65910_INT_MSK2 0x53
96#define TPS65910_INT_STS3 0x54
97#define TPS65910_INT_MSK3 0x55
98#define TPS65910_GPIO0 0x60
99#define TPS65910_GPIO1 0x61
100#define TPS65910_GPIO2 0x62
101#define TPS65910_GPIO3 0x63
102#define TPS65910_GPIO4 0x64
103#define TPS65910_GPIO5 0x65
104#define TPS65910_GPIO6 0x66
105#define TPS65910_GPIO7 0x67
106#define TPS65910_GPIO8 0x68
107#define TPS65910_JTAGVERNUM 0x80
108#define TPS65910_MAX_REGISTER 0x80
109
110/*
111 * List of registers specific to TPS65911
112 */
113#define TPS65911_VDDCTRL 0x27
114#define TPS65911_VDDCTRL_OP 0x28
115#define TPS65911_VDDCTRL_SR 0x29
116#define TPS65911_LDO1 0x30
117#define TPS65911_LDO2 0x31
118#define TPS65911_LDO5 0x32
119#define TPS65911_LDO8 0x33
120#define TPS65911_LDO7 0x34
121#define TPS65911_LDO6 0x35
122#define TPS65911_LDO4 0x36
123#define TPS65911_LDO3 0x37
124#define TPS65911_VMBCH 0x6A
125#define TPS65911_VMBCH2 0x6B
126
127/*
128 * List of register bitfields for component TPS65910
129 *
130 */
131
132
133/*Register BCK1 (0x80) register.RegisterDescription */
134#define BCK1_BCKUP_MASK 0xFF
135#define BCK1_BCKUP_SHIFT 0
136
137
138/*Register BCK2 (0x80) register.RegisterDescription */
139#define BCK2_BCKUP_MASK 0xFF
140#define BCK2_BCKUP_SHIFT 0
141
142
143/*Register BCK3 (0x80) register.RegisterDescription */
144#define BCK3_BCKUP_MASK 0xFF
145#define BCK3_BCKUP_SHIFT 0
146
147
148/*Register BCK4 (0x80) register.RegisterDescription */
149#define BCK4_BCKUP_MASK 0xFF
150#define BCK4_BCKUP_SHIFT 0
151
152
153/*Register BCK5 (0x80) register.RegisterDescription */
154#define BCK5_BCKUP_MASK 0xFF
155#define BCK5_BCKUP_SHIFT 0
156
157
158/*Register PUADEN (0x80) register.RegisterDescription */
159#define PUADEN_EN3P_MASK 0x80
160#define PUADEN_EN3P_SHIFT 7
161#define PUADEN_I2CCTLP_MASK 0x40
162#define PUADEN_I2CCTLP_SHIFT 6
163#define PUADEN_I2CSRP_MASK 0x20
164#define PUADEN_I2CSRP_SHIFT 5
165#define PUADEN_PWRONP_MASK 0x10
166#define PUADEN_PWRONP_SHIFT 4
167#define PUADEN_SLEEPP_MASK 0x08
168#define PUADEN_SLEEPP_SHIFT 3
169#define PUADEN_PWRHOLDP_MASK 0x04
170#define PUADEN_PWRHOLDP_SHIFT 2
171#define PUADEN_BOOT1P_MASK 0x02
172#define PUADEN_BOOT1P_SHIFT 1
173#define PUADEN_BOOT0P_MASK 0x01
174#define PUADEN_BOOT0P_SHIFT 0
175
176
177/*Register REF (0x80) register.RegisterDescription */
178#define REF_VMBCH_SEL_MASK 0x0C
179#define REF_VMBCH_SEL_SHIFT 2
180#define REF_ST_MASK 0x03
181#define REF_ST_SHIFT 0
182
183
184/*Register VRTC (0x80) register.RegisterDescription */
185#define VRTC_VRTC_OFFMASK_MASK 0x08
186#define VRTC_VRTC_OFFMASK_SHIFT 3
187#define VRTC_ST_MASK 0x03
188#define VRTC_ST_SHIFT 0
189
190
191/*Register VIO (0x80) register.RegisterDescription */
192#define VIO_ILMAX_MASK 0xC0
193#define VIO_ILMAX_SHIFT 6
194#define VIO_SEL_MASK 0x0C
195#define VIO_SEL_SHIFT 2
196#define VIO_ST_MASK 0x03
197#define VIO_ST_SHIFT 0
198
199
200/*Register VDD1 (0x80) register.RegisterDescription */
201#define VDD1_VGAIN_SEL_MASK 0xC0
202#define VDD1_VGAIN_SEL_SHIFT 6
203#define VDD1_ILMAX_MASK 0x20
204#define VDD1_ILMAX_SHIFT 5
205#define VDD1_TSTEP_MASK 0x1C
206#define VDD1_TSTEP_SHIFT 2
207#define VDD1_ST_MASK 0x03
208#define VDD1_ST_SHIFT 0
209
210
211/*Register VDD1_OP (0x80) register.RegisterDescription */
212#define VDD1_OP_CMD_MASK 0x80
213#define VDD1_OP_CMD_SHIFT 7
214#define VDD1_OP_SEL_MASK 0x7F
215#define VDD1_OP_SEL_SHIFT 0
216
217
218/*Register VDD1_SR (0x80) register.RegisterDescription */
219#define VDD1_SR_SEL_MASK 0x7F
220#define VDD1_SR_SEL_SHIFT 0
221
222
223/*Register VDD2 (0x80) register.RegisterDescription */
224#define VDD2_VGAIN_SEL_MASK 0xC0
225#define VDD2_VGAIN_SEL_SHIFT 6
226#define VDD2_ILMAX_MASK 0x20
227#define VDD2_ILMAX_SHIFT 5
228#define VDD2_TSTEP_MASK 0x1C
229#define VDD2_TSTEP_SHIFT 2
230#define VDD2_ST_MASK 0x03
231#define VDD2_ST_SHIFT 0
232
233
234/*Register VDD2_OP (0x80) register.RegisterDescription */
235#define VDD2_OP_CMD_MASK 0x80
236#define VDD2_OP_CMD_SHIFT 7
237#define VDD2_OP_SEL_MASK 0x7F
238#define VDD2_OP_SEL_SHIFT 0
239
240/*Register VDD2_SR (0x80) register.RegisterDescription */
241#define VDD2_SR_SEL_MASK 0x7F
242#define VDD2_SR_SEL_SHIFT 0
243
244
245/*Registers VDD1, VDD2 voltage values definitions */
246#define VDD1_2_NUM_VOLTS 73
247#define VDD1_2_MIN_VOLT 6000
248#define VDD1_2_OFFSET 125
249
250
251/*Register VDD3 (0x80) register.RegisterDescription */
252#define VDD3_CKINEN_MASK 0x04
253#define VDD3_CKINEN_SHIFT 2
254#define VDD3_ST_MASK 0x03
255#define VDD3_ST_SHIFT 0
256#define VDDCTRL_MIN_VOLT 6000
257#define VDDCTRL_OFFSET 125
258
259/*Registers VDIG (0x80) to VDAC register.RegisterDescription */
260#define LDO_SEL_MASK 0x0C
261#define LDO_SEL_SHIFT 2
262#define LDO_ST_MASK 0x03
263#define LDO_ST_SHIFT 0
264#define LDO_ST_ON_BIT 0x01
265#define LDO_ST_MODE_BIT 0x02
266
267
268/* Registers LDO1 to LDO8 in tps65910 */
269#define LDO1_SEL_MASK 0xFC
270#define LDO3_SEL_MASK 0x7C
271#define LDO_MIN_VOLT 1000
272#define LDO_MAX_VOLT 3300;
273
274
275/*Register VDIG1 (0x80) register.RegisterDescription */
276#define VDIG1_SEL_MASK 0x0C
277#define VDIG1_SEL_SHIFT 2
278#define VDIG1_ST_MASK 0x03
279#define VDIG1_ST_SHIFT 0
280
281
282/*Register VDIG2 (0x80) register.RegisterDescription */
283#define VDIG2_SEL_MASK 0x0C
284#define VDIG2_SEL_SHIFT 2
285#define VDIG2_ST_MASK 0x03
286#define VDIG2_ST_SHIFT 0
287
288
289/*Register VAUX1 (0x80) register.RegisterDescription */
290#define VAUX1_SEL_MASK 0x0C
291#define VAUX1_SEL_SHIFT 2
292#define VAUX1_ST_MASK 0x03
293#define VAUX1_ST_SHIFT 0
294
295
296/*Register VAUX2 (0x80) register.RegisterDescription */
297#define VAUX2_SEL_MASK 0x0C
298#define VAUX2_SEL_SHIFT 2
299#define VAUX2_ST_MASK 0x03
300#define VAUX2_ST_SHIFT 0
301
302
303/*Register VAUX33 (0x80) register.RegisterDescription */
304#define VAUX33_SEL_MASK 0x0C
305#define VAUX33_SEL_SHIFT 2
306#define VAUX33_ST_MASK 0x03
307#define VAUX33_ST_SHIFT 0
308
309
310/*Register VMMC (0x80) register.RegisterDescription */
311#define VMMC_SEL_MASK 0x0C
312#define VMMC_SEL_SHIFT 2
313#define VMMC_ST_MASK 0x03
314#define VMMC_ST_SHIFT 0
315
316
317/*Register VPLL (0x80) register.RegisterDescription */
318#define VPLL_SEL_MASK 0x0C
319#define VPLL_SEL_SHIFT 2
320#define VPLL_ST_MASK 0x03
321#define VPLL_ST_SHIFT 0
322
323
324/*Register VDAC (0x80) register.RegisterDescription */
325#define VDAC_SEL_MASK 0x0C
326#define VDAC_SEL_SHIFT 2
327#define VDAC_ST_MASK 0x03
328#define VDAC_ST_SHIFT 0
329
330
331/*Register THERM (0x80) register.RegisterDescription */
332#define THERM_THERM_HD_MASK 0x20
333#define THERM_THERM_HD_SHIFT 5
334#define THERM_THERM_TS_MASK 0x10
335#define THERM_THERM_TS_SHIFT 4
336#define THERM_THERM_HDSEL_MASK 0x0C
337#define THERM_THERM_HDSEL_SHIFT 2
338#define THERM_RSVD1_MASK 0x02
339#define THERM_RSVD1_SHIFT 1
340#define THERM_THERM_STATE_MASK 0x01
341#define THERM_THERM_STATE_SHIFT 0
342
343
344/*Register BBCH (0x80) register.RegisterDescription */
345#define BBCH_BBSEL_MASK 0x06
346#define BBCH_BBSEL_SHIFT 1
347#define BBCH_BBCHEN_MASK 0x01
348#define BBCH_BBCHEN_SHIFT 0
349
350
351/*Register DCDCCTRL (0x80) register.RegisterDescription */
352#define DCDCCTRL_VDD2_PSKIP_MASK 0x20
353#define DCDCCTRL_VDD2_PSKIP_SHIFT 5
354#define DCDCCTRL_VDD1_PSKIP_MASK 0x10
355#define DCDCCTRL_VDD1_PSKIP_SHIFT 4
356#define DCDCCTRL_VIO_PSKIP_MASK 0x08
357#define DCDCCTRL_VIO_PSKIP_SHIFT 3
358#define DCDCCTRL_DCDCCKEXT_MASK 0x04
359#define DCDCCTRL_DCDCCKEXT_SHIFT 2
360#define DCDCCTRL_DCDCCKSYNC_MASK 0x03
361#define DCDCCTRL_DCDCCKSYNC_SHIFT 0
362
363
364/*Register DEVCTRL (0x80) register.RegisterDescription */
365#define DEVCTRL_RTC_PWDN_MASK 0x40
366#define DEVCTRL_RTC_PWDN_SHIFT 6
367#define DEVCTRL_CK32K_CTRL_MASK 0x20
368#define DEVCTRL_CK32K_CTRL_SHIFT 5
369#define DEVCTRL_SR_CTL_I2C_SEL_MASK 0x10
370#define DEVCTRL_SR_CTL_I2C_SEL_SHIFT 4
371#define DEVCTRL_DEV_OFF_RST_MASK 0x08
372#define DEVCTRL_DEV_OFF_RST_SHIFT 3
373#define DEVCTRL_DEV_ON_MASK 0x04
374#define DEVCTRL_DEV_ON_SHIFT 2
375#define DEVCTRL_DEV_SLP_MASK 0x02
376#define DEVCTRL_DEV_SLP_SHIFT 1
377#define DEVCTRL_DEV_OFF_MASK 0x01
378#define DEVCTRL_DEV_OFF_SHIFT 0
379
380
381/*Register DEVCTRL2 (0x80) register.RegisterDescription */
382#define DEVCTRL2_TSLOT_LENGTH_MASK 0x30
383#define DEVCTRL2_TSLOT_LENGTH_SHIFT 4
384#define DEVCTRL2_SLEEPSIG_POL_MASK 0x08
385#define DEVCTRL2_SLEEPSIG_POL_SHIFT 3
386#define DEVCTRL2_PWON_LP_OFF_MASK 0x04
387#define DEVCTRL2_PWON_LP_OFF_SHIFT 2
388#define DEVCTRL2_PWON_LP_RST_MASK 0x02
389#define DEVCTRL2_PWON_LP_RST_SHIFT 1
390#define DEVCTRL2_IT_POL_MASK 0x01
391#define DEVCTRL2_IT_POL_SHIFT 0
392
393
394/*Register SLEEP_KEEP_LDO_ON (0x80) register.RegisterDescription */
395#define SLEEP_KEEP_LDO_ON_VDAC_KEEPON_MASK 0x80
396#define SLEEP_KEEP_LDO_ON_VDAC_KEEPON_SHIFT 7
397#define SLEEP_KEEP_LDO_ON_VPLL_KEEPON_MASK 0x40
398#define SLEEP_KEEP_LDO_ON_VPLL_KEEPON_SHIFT 6
399#define SLEEP_KEEP_LDO_ON_VAUX33_KEEPON_MASK 0x20
400#define SLEEP_KEEP_LDO_ON_VAUX33_KEEPON_SHIFT 5
401#define SLEEP_KEEP_LDO_ON_VAUX2_KEEPON_MASK 0x10
402#define SLEEP_KEEP_LDO_ON_VAUX2_KEEPON_SHIFT 4
403#define SLEEP_KEEP_LDO_ON_VAUX1_KEEPON_MASK 0x08
404#define SLEEP_KEEP_LDO_ON_VAUX1_KEEPON_SHIFT 3
405#define SLEEP_KEEP_LDO_ON_VDIG2_KEEPON_MASK 0x04
406#define SLEEP_KEEP_LDO_ON_VDIG2_KEEPON_SHIFT 2
407#define SLEEP_KEEP_LDO_ON_VDIG1_KEEPON_MASK 0x02
408#define SLEEP_KEEP_LDO_ON_VDIG1_KEEPON_SHIFT 1
409#define SLEEP_KEEP_LDO_ON_VMMC_KEEPON_MASK 0x01
410#define SLEEP_KEEP_LDO_ON_VMMC_KEEPON_SHIFT 0
411
412
413/*Register SLEEP_KEEP_RES_ON (0x80) register.RegisterDescription */
414#define SLEEP_KEEP_RES_ON_THERM_KEEPON_MASK 0x80
415#define SLEEP_KEEP_RES_ON_THERM_KEEPON_SHIFT 7
416#define SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_MASK 0x40
417#define SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_SHIFT 6
418#define SLEEP_KEEP_RES_ON_VRTC_KEEPON_MASK 0x20
419#define SLEEP_KEEP_RES_ON_VRTC_KEEPON_SHIFT 5
420#define SLEEP_KEEP_RES_ON_I2CHS_KEEPON_MASK 0x10
421#define SLEEP_KEEP_RES_ON_I2CHS_KEEPON_SHIFT 4
422#define SLEEP_KEEP_RES_ON_VDD3_KEEPON_MASK 0x08
423#define SLEEP_KEEP_RES_ON_VDD3_KEEPON_SHIFT 3
424#define SLEEP_KEEP_RES_ON_VDD2_KEEPON_MASK 0x04
425#define SLEEP_KEEP_RES_ON_VDD2_KEEPON_SHIFT 2
426#define SLEEP_KEEP_RES_ON_VDD1_KEEPON_MASK 0x02
427#define SLEEP_KEEP_RES_ON_VDD1_KEEPON_SHIFT 1
428#define SLEEP_KEEP_RES_ON_VIO_KEEPON_MASK 0x01
429#define SLEEP_KEEP_RES_ON_VIO_KEEPON_SHIFT 0
430
431
432/*Register SLEEP_SET_LDO_OFF (0x80) register.RegisterDescription */
433#define SLEEP_SET_LDO_OFF_VDAC_SETOFF_MASK 0x80
434#define SLEEP_SET_LDO_OFF_VDAC_SETOFF_SHIFT 7
435#define SLEEP_SET_LDO_OFF_VPLL_SETOFF_MASK 0x40
436#define SLEEP_SET_LDO_OFF_VPLL_SETOFF_SHIFT 6
437#define SLEEP_SET_LDO_OFF_VAUX33_SETOFF_MASK 0x20
438#define SLEEP_SET_LDO_OFF_VAUX33_SETOFF_SHIFT 5
439#define SLEEP_SET_LDO_OFF_VAUX2_SETOFF_MASK 0x10
440#define SLEEP_SET_LDO_OFF_VAUX2_SETOFF_SHIFT 4
441#define SLEEP_SET_LDO_OFF_VAUX1_SETOFF_MASK 0x08
442#define SLEEP_SET_LDO_OFF_VAUX1_SETOFF_SHIFT 3
443#define SLEEP_SET_LDO_OFF_VDIG2_SETOFF_MASK 0x04
444#define SLEEP_SET_LDO_OFF_VDIG2_SETOFF_SHIFT 2
445#define SLEEP_SET_LDO_OFF_VDIG1_SETOFF_MASK 0x02
446#define SLEEP_SET_LDO_OFF_VDIG1_SETOFF_SHIFT 1
447#define SLEEP_SET_LDO_OFF_VMMC_SETOFF_MASK 0x01
448#define SLEEP_SET_LDO_OFF_VMMC_SETOFF_SHIFT 0
449
450
451/*Register SLEEP_SET_RES_OFF (0x80) register.RegisterDescription */
452#define SLEEP_SET_RES_OFF_DEFAULT_VOLT_MASK 0x80
453#define SLEEP_SET_RES_OFF_DEFAULT_VOLT_SHIFT 7
454#define SLEEP_SET_RES_OFF_RSVD_MASK 0x60
455#define SLEEP_SET_RES_OFF_RSVD_SHIFT 5
456#define SLEEP_SET_RES_OFF_SPARE_SETOFF_MASK 0x10
457#define SLEEP_SET_RES_OFF_SPARE_SETOFF_SHIFT 4
458#define SLEEP_SET_RES_OFF_VDD3_SETOFF_MASK 0x08
459#define SLEEP_SET_RES_OFF_VDD3_SETOFF_SHIFT 3
460#define SLEEP_SET_RES_OFF_VDD2_SETOFF_MASK 0x04
461#define SLEEP_SET_RES_OFF_VDD2_SETOFF_SHIFT 2
462#define SLEEP_SET_RES_OFF_VDD1_SETOFF_MASK 0x02
463#define SLEEP_SET_RES_OFF_VDD1_SETOFF_SHIFT 1
464#define SLEEP_SET_RES_OFF_VIO_SETOFF_MASK 0x01
465#define SLEEP_SET_RES_OFF_VIO_SETOFF_SHIFT 0
466
467
468/*Register EN1_LDO_ASS (0x80) register.RegisterDescription */
469#define EN1_LDO_ASS_VDAC_EN1_MASK 0x80
470#define EN1_LDO_ASS_VDAC_EN1_SHIFT 7
471#define EN1_LDO_ASS_VPLL_EN1_MASK 0x40
472#define EN1_LDO_ASS_VPLL_EN1_SHIFT 6
473#define EN1_LDO_ASS_VAUX33_EN1_MASK 0x20
474#define EN1_LDO_ASS_VAUX33_EN1_SHIFT 5
475#define EN1_LDO_ASS_VAUX2_EN1_MASK 0x10
476#define EN1_LDO_ASS_VAUX2_EN1_SHIFT 4
477#define EN1_LDO_ASS_VAUX1_EN1_MASK 0x08
478#define EN1_LDO_ASS_VAUX1_EN1_SHIFT 3
479#define EN1_LDO_ASS_VDIG2_EN1_MASK 0x04
480#define EN1_LDO_ASS_VDIG2_EN1_SHIFT 2
481#define EN1_LDO_ASS_VDIG1_EN1_MASK 0x02
482#define EN1_LDO_ASS_VDIG1_EN1_SHIFT 1
483#define EN1_LDO_ASS_VMMC_EN1_MASK 0x01
484#define EN1_LDO_ASS_VMMC_EN1_SHIFT 0
485
486
487/*Register EN1_SMPS_ASS (0x80) register.RegisterDescription */
488#define EN1_SMPS_ASS_RSVD_MASK 0xE0
489#define EN1_SMPS_ASS_RSVD_SHIFT 5
490#define EN1_SMPS_ASS_SPARE_EN1_MASK 0x10
491#define EN1_SMPS_ASS_SPARE_EN1_SHIFT 4
492#define EN1_SMPS_ASS_VDD3_EN1_MASK 0x08
493#define EN1_SMPS_ASS_VDD3_EN1_SHIFT 3
494#define EN1_SMPS_ASS_VDD2_EN1_MASK 0x04
495#define EN1_SMPS_ASS_VDD2_EN1_SHIFT 2
496#define EN1_SMPS_ASS_VDD1_EN1_MASK 0x02
497#define EN1_SMPS_ASS_VDD1_EN1_SHIFT 1
498#define EN1_SMPS_ASS_VIO_EN1_MASK 0x01
499#define EN1_SMPS_ASS_VIO_EN1_SHIFT 0
500
501
502/*Register EN2_LDO_ASS (0x80) register.RegisterDescription */
503#define EN2_LDO_ASS_VDAC_EN2_MASK 0x80
504#define EN2_LDO_ASS_VDAC_EN2_SHIFT 7
505#define EN2_LDO_ASS_VPLL_EN2_MASK 0x40
506#define EN2_LDO_ASS_VPLL_EN2_SHIFT 6
507#define EN2_LDO_ASS_VAUX33_EN2_MASK 0x20
508#define EN2_LDO_ASS_VAUX33_EN2_SHIFT 5
509#define EN2_LDO_ASS_VAUX2_EN2_MASK 0x10
510#define EN2_LDO_ASS_VAUX2_EN2_SHIFT 4
511#define EN2_LDO_ASS_VAUX1_EN2_MASK 0x08
512#define EN2_LDO_ASS_VAUX1_EN2_SHIFT 3
513#define EN2_LDO_ASS_VDIG2_EN2_MASK 0x04
514#define EN2_LDO_ASS_VDIG2_EN2_SHIFT 2
515#define EN2_LDO_ASS_VDIG1_EN2_MASK 0x02
516#define EN2_LDO_ASS_VDIG1_EN2_SHIFT 1
517#define EN2_LDO_ASS_VMMC_EN2_MASK 0x01
518#define EN2_LDO_ASS_VMMC_EN2_SHIFT 0
519
520
521/*Register EN2_SMPS_ASS (0x80) register.RegisterDescription */
522#define EN2_SMPS_ASS_RSVD_MASK 0xE0
523#define EN2_SMPS_ASS_RSVD_SHIFT 5
524#define EN2_SMPS_ASS_SPARE_EN2_MASK 0x10
525#define EN2_SMPS_ASS_SPARE_EN2_SHIFT 4
526#define EN2_SMPS_ASS_VDD3_EN2_MASK 0x08
527#define EN2_SMPS_ASS_VDD3_EN2_SHIFT 3
528#define EN2_SMPS_ASS_VDD2_EN2_MASK 0x04
529#define EN2_SMPS_ASS_VDD2_EN2_SHIFT 2
530#define EN2_SMPS_ASS_VDD1_EN2_MASK 0x02
531#define EN2_SMPS_ASS_VDD1_EN2_SHIFT 1
532#define EN2_SMPS_ASS_VIO_EN2_MASK 0x01
533#define EN2_SMPS_ASS_VIO_EN2_SHIFT 0
534
535
536/*Register EN3_LDO_ASS (0x80) register.RegisterDescription */
537#define EN3_LDO_ASS_VDAC_EN3_MASK 0x80
538#define EN3_LDO_ASS_VDAC_EN3_SHIFT 7
539#define EN3_LDO_ASS_VPLL_EN3_MASK 0x40
540#define EN3_LDO_ASS_VPLL_EN3_SHIFT 6
541#define EN3_LDO_ASS_VAUX33_EN3_MASK 0x20
542#define EN3_LDO_ASS_VAUX33_EN3_SHIFT 5
543#define EN3_LDO_ASS_VAUX2_EN3_MASK 0x10
544#define EN3_LDO_ASS_VAUX2_EN3_SHIFT 4
545#define EN3_LDO_ASS_VAUX1_EN3_MASK 0x08
546#define EN3_LDO_ASS_VAUX1_EN3_SHIFT 3
547#define EN3_LDO_ASS_VDIG2_EN3_MASK 0x04
548#define EN3_LDO_ASS_VDIG2_EN3_SHIFT 2
549#define EN3_LDO_ASS_VDIG1_EN3_MASK 0x02
550#define EN3_LDO_ASS_VDIG1_EN3_SHIFT 1
551#define EN3_LDO_ASS_VMMC_EN3_MASK 0x01
552#define EN3_LDO_ASS_VMMC_EN3_SHIFT 0
553
554
555/*Register SPARE (0x80) register.RegisterDescription */
556#define SPARE_SPARE_MASK 0xFF
557#define SPARE_SPARE_SHIFT 0
558
559
560/*Register INT_STS (0x80) register.RegisterDescription */
561#define INT_STS_RTC_PERIOD_IT_MASK 0x80
562#define INT_STS_RTC_PERIOD_IT_SHIFT 7
563#define INT_STS_RTC_ALARM_IT_MASK 0x40
564#define INT_STS_RTC_ALARM_IT_SHIFT 6
565#define INT_STS_HOTDIE_IT_MASK 0x20
566#define INT_STS_HOTDIE_IT_SHIFT 5
567#define INT_STS_PWRHOLD_IT_MASK 0x10
568#define INT_STS_PWRHOLD_IT_SHIFT 4
569#define INT_STS_PWRON_LP_IT_MASK 0x08
570#define INT_STS_PWRON_LP_IT_SHIFT 3
571#define INT_STS_PWRON_IT_MASK 0x04
572#define INT_STS_PWRON_IT_SHIFT 2
573#define INT_STS_VMBHI_IT_MASK 0x02
574#define INT_STS_VMBHI_IT_SHIFT 1
575#define INT_STS_VMBDCH_IT_MASK 0x01
576#define INT_STS_VMBDCH_IT_SHIFT 0
577
578
579/*Register INT_MSK (0x80) register.RegisterDescription */
580#define INT_MSK_RTC_PERIOD_IT_MSK_MASK 0x80
581#define INT_MSK_RTC_PERIOD_IT_MSK_SHIFT 7
582#define INT_MSK_RTC_ALARM_IT_MSK_MASK 0x40
583#define INT_MSK_RTC_ALARM_IT_MSK_SHIFT 6
584#define INT_MSK_HOTDIE_IT_MSK_MASK 0x20
585#define INT_MSK_HOTDIE_IT_MSK_SHIFT 5
586#define INT_MSK_PWRHOLD_IT_MSK_MASK 0x10
587#define INT_MSK_PWRHOLD_IT_MSK_SHIFT 4
588#define INT_MSK_PWRON_LP_IT_MSK_MASK 0x08
589#define INT_MSK_PWRON_LP_IT_MSK_SHIFT 3
590#define INT_MSK_PWRON_IT_MSK_MASK 0x04
591#define INT_MSK_PWRON_IT_MSK_SHIFT 2
592#define INT_MSK_VMBHI_IT_MSK_MASK 0x02
593#define INT_MSK_VMBHI_IT_MSK_SHIFT 1
594#define INT_MSK_VMBDCH_IT_MSK_MASK 0x01
595#define INT_MSK_VMBDCH_IT_MSK_SHIFT 0
596
597
598/*Register INT_STS2 (0x80) register.RegisterDescription */
599#define INT_STS2_GPIO3_F_IT_MASK 0x80
600#define INT_STS2_GPIO3_F_IT_SHIFT 7
601#define INT_STS2_GPIO3_R_IT_MASK 0x40
602#define INT_STS2_GPIO3_R_IT_SHIFT 6
603#define INT_STS2_GPIO2_F_IT_MASK 0x20
604#define INT_STS2_GPIO2_F_IT_SHIFT 5
605#define INT_STS2_GPIO2_R_IT_MASK 0x10
606#define INT_STS2_GPIO2_R_IT_SHIFT 4
607#define INT_STS2_GPIO1_F_IT_MASK 0x08
608#define INT_STS2_GPIO1_F_IT_SHIFT 3
609#define INT_STS2_GPIO1_R_IT_MASK 0x04
610#define INT_STS2_GPIO1_R_IT_SHIFT 2
611#define INT_STS2_GPIO0_F_IT_MASK 0x02
612#define INT_STS2_GPIO0_F_IT_SHIFT 1
613#define INT_STS2_GPIO0_R_IT_MASK 0x01
614#define INT_STS2_GPIO0_R_IT_SHIFT 0
615
616
617/*Register INT_MSK2 (0x80) register.RegisterDescription */
618#define INT_MSK2_GPIO3_F_IT_MSK_MASK 0x80
619#define INT_MSK2_GPIO3_F_IT_MSK_SHIFT 7
620#define INT_MSK2_GPIO3_R_IT_MSK_MASK 0x40
621#define INT_MSK2_GPIO3_R_IT_MSK_SHIFT 6
622#define INT_MSK2_GPIO2_F_IT_MSK_MASK 0x20
623#define INT_MSK2_GPIO2_F_IT_MSK_SHIFT 5
624#define INT_MSK2_GPIO2_R_IT_MSK_MASK 0x10
625#define INT_MSK2_GPIO2_R_IT_MSK_SHIFT 4
626#define INT_MSK2_GPIO1_F_IT_MSK_MASK 0x08
627#define INT_MSK2_GPIO1_F_IT_MSK_SHIFT 3
628#define INT_MSK2_GPIO1_R_IT_MSK_MASK 0x04
629#define INT_MSK2_GPIO1_R_IT_MSK_SHIFT 2
630#define INT_MSK2_GPIO0_F_IT_MSK_MASK 0x02
631#define INT_MSK2_GPIO0_F_IT_MSK_SHIFT 1
632#define INT_MSK2_GPIO0_R_IT_MSK_MASK 0x01
633#define INT_MSK2_GPIO0_R_IT_MSK_SHIFT 0
634
635
636/*Register INT_STS3 (0x80) register.RegisterDescription */
637#define INT_STS3_GPIO5_F_IT_MASK 0x08
638#define INT_STS3_GPIO5_F_IT_SHIFT 3
639#define INT_STS3_GPIO5_R_IT_MASK 0x04
640#define INT_STS3_GPIO5_R_IT_SHIFT 2
641#define INT_STS3_GPIO4_F_IT_MASK 0x02
642#define INT_STS3_GPIO4_F_IT_SHIFT 1
643#define INT_STS3_GPIO4_R_IT_MASK 0x01
644#define INT_STS3_GPIO4_R_IT_SHIFT 0
645
646
647/*Register INT_MSK3 (0x80) register.RegisterDescription */
648#define INT_MSK3_GPIO5_F_IT_MSK_MASK 0x08
649#define INT_MSK3_GPIO5_F_IT_MSK_SHIFT 3
650#define INT_MSK3_GPIO5_R_IT_MSK_MASK 0x04
651#define INT_MSK3_GPIO5_R_IT_MSK_SHIFT 2
652#define INT_MSK3_GPIO4_F_IT_MSK_MASK 0x02
653#define INT_MSK3_GPIO4_F_IT_MSK_SHIFT 1
654#define INT_MSK3_GPIO4_R_IT_MSK_MASK 0x01
655#define INT_MSK3_GPIO4_R_IT_MSK_SHIFT 0
656
657
658/*Register GPIO (0x80) register.RegisterDescription */
659#define GPIO_DEB_MASK 0x10
660#define GPIO_DEB_SHIFT 4
661#define GPIO_PUEN_MASK 0x08
662#define GPIO_PUEN_SHIFT 3
663#define GPIO_CFG_MASK 0x04
664#define GPIO_CFG_SHIFT 2
665#define GPIO_STS_MASK 0x02
666#define GPIO_STS_SHIFT 1
667#define GPIO_SET_MASK 0x01
668#define GPIO_SET_SHIFT 0
669
670
671/*Register JTAGVERNUM (0x80) register.RegisterDescription */
672#define JTAGVERNUM_VERNUM_MASK 0x0F
673#define JTAGVERNUM_VERNUM_SHIFT 0
674
675
676/* Register VDDCTRL (0x27) bit definitions */
677#define VDDCTRL_ST_MASK 0x03
678#define VDDCTRL_ST_SHIFT 0
679
680
681/*Register VDDCTRL_OP (0x28) bit definitios */
682#define VDDCTRL_OP_CMD_MASK 0x80
683#define VDDCTRL_OP_CMD_SHIFT 7
684#define VDDCTRL_OP_SEL_MASK 0x7F
685#define VDDCTRL_OP_SEL_SHIFT 0
686
687
688/*Register VDDCTRL_SR (0x29) bit definitions */
689#define VDDCTRL_SR_SEL_MASK 0x7F
690#define VDDCTRL_SR_SEL_SHIFT 0
691
692
693/* IRQ Definitions */
694#define TPS65910_IRQ_VBAT_VMBDCH 0
695#define TPS65910_IRQ_VBAT_VMHI 1
696#define TPS65910_IRQ_PWRON 2
697#define TPS65910_IRQ_PWRON_LP 3
698#define TPS65910_IRQ_PWRHOLD 4
699#define TPS65910_IRQ_HOTDIE 5
700#define TPS65910_IRQ_RTC_ALARM 6
701#define TPS65910_IRQ_RTC_PERIOD 7
702#define TPS65910_IRQ_GPIO_R 8
703#define TPS65910_IRQ_GPIO_F 9
704#define TPS65910_NUM_IRQ 10
705
706#define TPS65911_IRQ_VBAT_VMBDCH 0
707#define TPS65911_IRQ_VBAT_VMBDCH2L 1
708#define TPS65911_IRQ_VBAT_VMBDCH2H 2
709#define TPS65911_IRQ_VBAT_VMHI 3
710#define TPS65911_IRQ_PWRON 4
711#define TPS65911_IRQ_PWRON_LP 5
712#define TPS65911_IRQ_PWRHOLD_F 6
713#define TPS65911_IRQ_PWRHOLD_R 7
714#define TPS65911_IRQ_HOTDIE 8
715#define TPS65911_IRQ_RTC_ALARM 9
716#define TPS65911_IRQ_RTC_PERIOD 10
717#define TPS65911_IRQ_GPIO0_R 11
718#define TPS65911_IRQ_GPIO0_F 12
719#define TPS65911_IRQ_GPIO1_R 13
720#define TPS65911_IRQ_GPIO1_F 14
721#define TPS65911_IRQ_GPIO2_R 15
722#define TPS65911_IRQ_GPIO2_F 16
723#define TPS65911_IRQ_GPIO3_R 17
724#define TPS65911_IRQ_GPIO3_F 18
725#define TPS65911_IRQ_GPIO4_R 19
726#define TPS65911_IRQ_GPIO4_F 20
727#define TPS65911_IRQ_GPIO5_R 21
728#define TPS65911_IRQ_GPIO5_F 22
729#define TPS65911_IRQ_WTCHDG 23
730#define TPS65911_IRQ_PWRDN 24
731
732#define TPS65911_NUM_IRQ 25
733
734
735/* GPIO Register Definitions */
736#define TPS65910_GPIO_DEB BIT(2)
737#define TPS65910_GPIO_PUEN BIT(3)
738#define TPS65910_GPIO_CFG BIT(2)
739#define TPS65910_GPIO_STS BIT(1)
740#define TPS65910_GPIO_SET BIT(0)
741
742/**
743 * struct tps65910_board
744 * Board platform data may be used to initialize regulators.
745 */
746
747struct tps65910_board {
748 int gpio_base;
749 int irq;
750 int irq_base;
751 int vmbch_threshold;
752 int vmbch2_threshold;
753 struct regulator_init_data *tps65910_pmic_init_data;
754};
755
756/**
757 * struct tps65910 - tps65910 sub-driver chip access routines
758 */
759
760struct tps65910 {
761 struct device *dev;
762 struct i2c_client *i2c_client;
763 struct mutex io_mutex;
764 unsigned int id;
765 int (*read)(struct tps65910 *tps65910, u8 reg, int size, void *dest);
766 int (*write)(struct tps65910 *tps65910, u8 reg, int size, void *src);
767
768 /* Client devices */
769 struct tps65910_pmic *pmic;
770 struct tps65910_rtc *rtc;
771 struct tps65910_power *power;
772
773 /* GPIO Handling */
774 struct gpio_chip gpio;
775
776 /* IRQ Handling */
777 struct mutex irq_lock;
778 int chip_irq;
779 int irq_base;
780 int irq_num;
781 u32 irq_mask;
782};
783
784struct tps65910_platform_data {
785 int irq;
786 int irq_base;
787};
788
789int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask);
790int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask);
791void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base);
792int tps65910_irq_init(struct tps65910 *tps65910, int irq,
793 struct tps65910_platform_data *pdata);
794
795static inline int tps65910_chip_id(struct tps65910 *tps65910)
796{
797 return tps65910->id;
798}
799
800#endif /* __LINUX_MFD_TPS65910_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fb8e814f78dc..9670f71d7be9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1408,17 +1408,11 @@ extern void exit_mmap(struct mm_struct *);
1408extern int mm_take_all_locks(struct mm_struct *mm); 1408extern int mm_take_all_locks(struct mm_struct *mm);
1409extern void mm_drop_all_locks(struct mm_struct *mm); 1409extern void mm_drop_all_locks(struct mm_struct *mm);
1410 1410
1411#ifdef CONFIG_PROC_FS
1412/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ 1411/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */
1413extern void added_exe_file_vma(struct mm_struct *mm); 1412extern void added_exe_file_vma(struct mm_struct *mm);
1414extern void removed_exe_file_vma(struct mm_struct *mm); 1413extern void removed_exe_file_vma(struct mm_struct *mm);
1415#else 1414extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
1416static inline void added_exe_file_vma(struct mm_struct *mm) 1415extern struct file *get_mm_exe_file(struct mm_struct *mm);
1417{}
1418
1419static inline void removed_exe_file_vma(struct mm_struct *mm)
1420{}
1421#endif /* CONFIG_PROC_FS */
1422 1416
1423extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); 1417extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
1424extern int install_special_mapping(struct mm_struct *mm, 1418extern int install_special_mapping(struct mm_struct *mm,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 6fe96c19f85e..2a78aae78c69 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -302,11 +302,9 @@ struct mm_struct {
302 struct task_struct __rcu *owner; 302 struct task_struct __rcu *owner;
303#endif 303#endif
304 304
305#ifdef CONFIG_PROC_FS
306 /* store ref to file /proc/<pid>/exe symlink points to */ 305 /* store ref to file /proc/<pid>/exe symlink points to */
307 struct file *exe_file; 306 struct file *exe_file;
308 unsigned long num_exe_file_vmas; 307 unsigned long num_exe_file_vmas;
309#endif
310#ifdef CONFIG_MMU_NOTIFIER 308#ifdef CONFIG_MMU_NOTIFIER
311 struct mmu_notifier_mm *mmu_notifier_mm; 309 struct mmu_notifier_mm *mmu_notifier_mm;
312#endif 310#endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 217bcf6bca77..c928dac6cad0 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -273,11 +273,6 @@ struct zone_reclaim_stat {
273 */ 273 */
274 unsigned long recent_rotated[2]; 274 unsigned long recent_rotated[2];
275 unsigned long recent_scanned[2]; 275 unsigned long recent_scanned[2];
276
277 /*
278 * accumulated for batching
279 */
280 unsigned long nr_saved_scan[NR_LRU_LISTS];
281}; 276};
282 277
283struct zone { 278struct zone {
@@ -1056,12 +1051,14 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1056 return __nr_to_section(pfn_to_section_nr(pfn)); 1051 return __nr_to_section(pfn_to_section_nr(pfn));
1057} 1052}
1058 1053
1054#ifndef CONFIG_HAVE_ARCH_PFN_VALID
1059static inline int pfn_valid(unsigned long pfn) 1055static inline int pfn_valid(unsigned long pfn)
1060{ 1056{
1061 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 1057 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1062 return 0; 1058 return 0;
1063 return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); 1059 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
1064} 1060}
1061#endif
1065 1062
1066static inline int pfn_present(unsigned long pfn) 1063static inline int pfn_present(unsigned long pfn)
1067{ 1064{
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 9d5306bad117..2541fb848daa 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -322,9 +322,12 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
322 322
323 /* Kernel-side ioctl definitions */ 323 /* Kernel-side ioctl definitions */
324 324
325extern int add_mtd_device(struct mtd_info *mtd); 325struct mtd_partition;
326extern int del_mtd_device (struct mtd_info *mtd);
327 326
327extern int mtd_device_register(struct mtd_info *master,
328 const struct mtd_partition *parts,
329 int nr_parts);
330extern int mtd_device_unregister(struct mtd_info *master);
328extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num); 331extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
329extern int __get_mtd_device(struct mtd_info *mtd); 332extern int __get_mtd_device(struct mtd_info *mtd);
330extern void __put_mtd_device(struct mtd_info *mtd); 333extern void __put_mtd_device(struct mtd_info *mtd);
@@ -348,15 +351,9 @@ int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
348int default_mtd_readv(struct mtd_info *mtd, struct kvec *vecs, 351int default_mtd_readv(struct mtd_info *mtd, struct kvec *vecs,
349 unsigned long count, loff_t from, size_t *retlen); 352 unsigned long count, loff_t from, size_t *retlen);
350 353
351#ifdef CONFIG_MTD_PARTITIONS 354void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
355
352void mtd_erase_callback(struct erase_info *instr); 356void mtd_erase_callback(struct erase_info *instr);
353#else
354static inline void mtd_erase_callback(struct erase_info *instr)
355{
356 if (instr->callback)
357 instr->callback(instr);
358}
359#endif
360 357
361/* 358/*
362 * Debugging macro and defines 359 * Debugging macro and defines
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index d44192740f6f..c2b9ac4fbc4a 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -237,9 +237,9 @@ typedef enum {
237 * If passed additionally to NAND_USE_FLASH_BBT then BBT code will not touch 237 * If passed additionally to NAND_USE_FLASH_BBT then BBT code will not touch
238 * the OOB area. 238 * the OOB area.
239 */ 239 */
240#define NAND_USE_FLASH_BBT_NO_OOB 0x00100000 240#define NAND_USE_FLASH_BBT_NO_OOB 0x00800000
241/* Create an empty BBT with no vendor information if the BBT is available */ 241/* Create an empty BBT with no vendor information if the BBT is available */
242#define NAND_CREATE_EMPTY_BBT 0x00200000 242#define NAND_CREATE_EMPTY_BBT 0x01000000
243 243
244/* Options set by nand scan */ 244/* Options set by nand scan */
245/* Nand scan has allocated controller struct */ 245/* Nand scan has allocated controller struct */
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 4a0a8ba90a72..3a6f0372fc96 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -16,7 +16,7 @@
16 * Partition definition structure: 16 * Partition definition structure:
17 * 17 *
18 * An array of struct partition is passed along with a MTD object to 18 * An array of struct partition is passed along with a MTD object to
19 * add_mtd_partitions() to create them. 19 * mtd_device_register() to create them.
20 * 20 *
21 * For each partition, these fields are available: 21 * For each partition, these fields are available:
22 * name: string that will be used to label the partition's MTD device. 22 * name: string that will be used to label the partition's MTD device.
@@ -49,9 +49,6 @@ struct mtd_partition {
49 49
50struct mtd_info; 50struct mtd_info;
51 51
52int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
53int del_mtd_partitions(struct mtd_info *);
54
55/* 52/*
56 * Functions dealing with the various ways of partitioning the space 53 * Functions dealing with the various ways of partitioning the space
57 */ 54 */
@@ -73,14 +70,17 @@ extern int parse_mtd_partitions(struct mtd_info *master, const char **types,
73struct device; 70struct device;
74struct device_node; 71struct device_node;
75 72
73#ifdef CONFIG_MTD_OF_PARTS
76int __devinit of_mtd_parse_partitions(struct device *dev, 74int __devinit of_mtd_parse_partitions(struct device *dev,
77 struct device_node *node, 75 struct device_node *node,
78 struct mtd_partition **pparts); 76 struct mtd_partition **pparts);
79
80#ifdef CONFIG_MTD_PARTITIONS
81static inline int mtd_has_partitions(void) { return 1; }
82#else 77#else
83static inline int mtd_has_partitions(void) { return 0; } 78static inline int of_mtd_parse_partitions(struct device *dev,
79 struct device_node *node,
80 struct mtd_partition **pparts)
81{
82 return 0;
83}
84#endif 84#endif
85 85
86#ifdef CONFIG_MTD_CMDLINE_PARTS 86#ifdef CONFIG_MTD_CMDLINE_PARTS
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h
index 49b959029417..d40bfa1d9c91 100644
--- a/include/linux/mtd/physmap.h
+++ b/include/linux/mtd/physmap.h
@@ -37,8 +37,6 @@ struct physmap_flash_data {
37void physmap_configure(unsigned long addr, unsigned long size, 37void physmap_configure(unsigned long addr, unsigned long size,
38 int bankwidth, void (*set_vpp)(struct map_info *, int) ); 38 int bankwidth, void (*set_vpp)(struct map_info *, int) );
39 39
40#ifdef CONFIG_MTD_PARTITIONS
41
42/* 40/*
43 * Machines that wish to do flash partition may want to call this function in 41 * Machines that wish to do flash partition may want to call this function in
44 * their setup routine. 42 * their setup routine.
@@ -50,6 +48,4 @@ void physmap_configure(unsigned long addr, unsigned long size,
50 */ 48 */
51void physmap_set_partitions(struct mtd_partition *parts, int num_parts); 49void physmap_set_partitions(struct mtd_partition *parts, int num_parts);
52 50
53#endif /* defined(CONFIG_MTD_PARTITIONS) */
54
55#endif /* __LINUX_MTD_PHYSMAP__ */ 51#endif /* __LINUX_MTD_PHYSMAP__ */
diff --git a/include/linux/net.h b/include/linux/net.h
index 1da55e9b6f01..b29923006b11 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -289,11 +289,5 @@ extern int kernel_sock_shutdown(struct socket *sock,
289 MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \ 289 MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
290 "-type-" __stringify(type)) 290 "-type-" __stringify(type))
291 291
292#ifdef CONFIG_SYSCTL
293#include <linux/sysctl.h>
294#include <linux/ratelimit.h>
295extern struct ratelimit_state net_ratelimit_state;
296#endif
297
298#endif /* __KERNEL__ */ 292#endif /* __KERNEL__ */
299#endif /* _LINUX_NET_H */ 293#endif /* _LINUX_NET_H */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 7fa95df60146..857f5026ced6 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -13,6 +13,7 @@
13#endif 13#endif
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <linux/sysctl.h>
16 17
17/* Responses from hook functions. */ 18/* Responses from hook functions. */
18#define NF_DROP 0 19#define NF_DROP 0
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h
index a0196ac79051..ac3c822eb39a 100644
--- a/include/linux/netfilter/ipset/ip_set_ahash.h
+++ b/include/linux/netfilter/ipset/ip_set_ahash.h
@@ -839,7 +839,7 @@ type_pf_tdel(struct ip_set *set, void *value, u32 timeout)
839 struct htable *t = h->table; 839 struct htable *t = h->table;
840 const struct type_pf_elem *d = value; 840 const struct type_pf_elem *d = value;
841 struct hbucket *n; 841 struct hbucket *n;
842 int i, ret = 0; 842 int i;
843 struct type_pf_elem *data; 843 struct type_pf_elem *data;
844 u32 key; 844 u32 key;
845 845
@@ -850,7 +850,7 @@ type_pf_tdel(struct ip_set *set, void *value, u32 timeout)
850 if (!type_pf_data_equal(data, d)) 850 if (!type_pf_data_equal(data, d))
851 continue; 851 continue;
852 if (type_pf_data_expired(data)) 852 if (type_pf_data_expired(data))
853 ret = -IPSET_ERR_EXIST; 853 return -IPSET_ERR_EXIST;
854 if (i != n->pos - 1) 854 if (i != n->pos - 1)
855 /* Not last one */ 855 /* Not last one */
856 type_pf_data_copy(data, ahash_tdata(n, n->pos - 1)); 856 type_pf_data_copy(data, ahash_tdata(n, n->pos - 1));
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
index 9f30c5f2ec1c..bcdd40ad39ed 100644
--- a/include/linux/netfilter/ipset/ip_set_timeout.h
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -45,7 +45,7 @@ ip_set_timeout_test(unsigned long timeout)
45{ 45{
46 return timeout != IPSET_ELEM_UNSET && 46 return timeout != IPSET_ELEM_UNSET &&
47 (timeout == IPSET_ELEM_PERMANENT || 47 (timeout == IPSET_ELEM_PERMANENT ||
48 time_after(timeout, jiffies)); 48 time_is_after_jiffies(timeout));
49} 49}
50 50
51static inline bool 51static inline bool
@@ -53,7 +53,7 @@ ip_set_timeout_expired(unsigned long timeout)
53{ 53{
54 return timeout != IPSET_ELEM_UNSET && 54 return timeout != IPSET_ELEM_UNSET &&
55 timeout != IPSET_ELEM_PERMANENT && 55 timeout != IPSET_ELEM_PERMANENT &&
56 time_before(timeout, jiffies); 56 time_is_before_jiffies(timeout);
57} 57}
58 58
59static inline unsigned long 59static inline unsigned long
@@ -64,7 +64,7 @@ ip_set_timeout_set(u32 timeout)
64 if (!timeout) 64 if (!timeout)
65 return IPSET_ELEM_PERMANENT; 65 return IPSET_ELEM_PERMANENT;
66 66
67 t = timeout * HZ + jiffies; 67 t = msecs_to_jiffies(timeout * 1000) + jiffies;
68 if (t == IPSET_ELEM_UNSET || t == IPSET_ELEM_PERMANENT) 68 if (t == IPSET_ELEM_UNSET || t == IPSET_ELEM_PERMANENT)
69 /* Bingo! */ 69 /* Bingo! */
70 t++; 70 t++;
@@ -75,7 +75,8 @@ ip_set_timeout_set(u32 timeout)
75static inline u32 75static inline u32
76ip_set_timeout_get(unsigned long timeout) 76ip_set_timeout_get(unsigned long timeout)
77{ 77{
78 return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ; 78 return timeout == IPSET_ELEM_PERMANENT ? 0 :
79 jiffies_to_msecs(timeout - jiffies)/1000;
79} 80}
80 81
81#else 82#else
@@ -89,14 +90,14 @@ static inline bool
89ip_set_timeout_test(unsigned long timeout) 90ip_set_timeout_test(unsigned long timeout)
90{ 91{
91 return timeout == IPSET_ELEM_PERMANENT || 92 return timeout == IPSET_ELEM_PERMANENT ||
92 time_after(timeout, jiffies); 93 time_is_after_jiffies(timeout);
93} 94}
94 95
95static inline bool 96static inline bool
96ip_set_timeout_expired(unsigned long timeout) 97ip_set_timeout_expired(unsigned long timeout)
97{ 98{
98 return timeout != IPSET_ELEM_PERMANENT && 99 return timeout != IPSET_ELEM_PERMANENT &&
99 time_before(timeout, jiffies); 100 time_is_before_jiffies(timeout);
100} 101}
101 102
102static inline unsigned long 103static inline unsigned long
@@ -107,7 +108,7 @@ ip_set_timeout_set(u32 timeout)
107 if (!timeout) 108 if (!timeout)
108 return IPSET_ELEM_PERMANENT; 109 return IPSET_ELEM_PERMANENT;
109 110
110 t = timeout * HZ + jiffies; 111 t = msecs_to_jiffies(timeout * 1000) + jiffies;
111 if (t == IPSET_ELEM_PERMANENT) 112 if (t == IPSET_ELEM_PERMANENT)
112 /* Bingo! :-) */ 113 /* Bingo! :-) */
113 t++; 114 t++;
@@ -118,7 +119,8 @@ ip_set_timeout_set(u32 timeout)
118static inline u32 119static inline u32
119ip_set_timeout_get(unsigned long timeout) 120ip_set_timeout_get(unsigned long timeout)
120{ 121{
121 return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ; 122 return timeout == IPSET_ELEM_PERMANENT ? 0 :
123 jiffies_to_msecs(timeout - jiffies)/1000;
122} 124}
123#endif /* ! IP_SET_BITMAP_TIMEOUT */ 125#endif /* ! IP_SET_BITMAP_TIMEOUT */
124 126
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index 7b370c7cfeff..50d20aba57d3 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -81,13 +81,4 @@ static inline void get_nsproxy(struct nsproxy *ns)
81 atomic_inc(&ns->count); 81 atomic_inc(&ns->count);
82} 82}
83 83
84#ifdef CONFIG_CGROUP_NS
85int ns_cgroup_clone(struct task_struct *tsk, struct pid *pid);
86#else
87static inline int ns_cgroup_clone(struct task_struct *tsk, struct pid *pid)
88{
89 return 0;
90}
91#endif
92
93#endif 84#endif
diff --git a/include/linux/pid.h b/include/linux/pid.h
index cdced84261d7..b152d44fb181 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -105,7 +105,7 @@ extern struct pid_namespace init_pid_ns;
105 * or rcu_read_lock() held. 105 * or rcu_read_lock() held.
106 * 106 *
107 * find_pid_ns() finds the pid in the namespace specified 107 * find_pid_ns() finds the pid in the namespace specified
108 * find_vpid() finr the pid by its virtual id, i.e. in the current namespace 108 * find_vpid() finds the pid by its virtual id, i.e. in the current namespace
109 * 109 *
110 * see also find_task_by_vpid() set in include/linux/sched.h 110 * see also find_task_by_vpid() set in include/linux/sched.h
111 */ 111 */
diff --git a/arch/arm/mach-s5p6442/include/mach/dma.h b/include/linux/power/isp1704_charger.h
index 81209eb1409b..68096a6aa2d7 100644
--- a/arch/arm/mach-s5p6442/include/mach/dma.h
+++ b/include/linux/power/isp1704_charger.h
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (C) 2010 Samsung Electronics Co. Ltd. 2 * ISP1704 USB Charger Detection driver
3 * Jaswinder Singh <jassi.brar@samsung.com> 3 *
4 * Copyright (C) 2011 Nokia Corporation
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -14,13 +15,15 @@
14 * 15 *
15 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 19 */
19 20
20#ifndef __MACH_DMA_H
21#define __MACH_DMA_H
22 21
23/* This platform uses the common S3C DMA API driver for PL330 */ 22#ifndef __ISP1704_CHARGER_H
24#include <plat/s3c-dma-pl330.h> 23#define __ISP1704_CHARGER_H
24
25struct isp1704_charger_data {
26 void (*set_power)(bool on);
27};
25 28
26#endif /* __MACH_DMA_H */ 29#endif
diff --git a/include/linux/power/max8903_charger.h b/include/linux/power/max8903_charger.h
new file mode 100644
index 000000000000..24f51db8a83f
--- /dev/null
+++ b/include/linux/power/max8903_charger.h
@@ -0,0 +1,57 @@
1/*
2 * max8903_charger.h - Maxim 8903 USB/Adapter Charger Driver
3 *
4 * Copyright (C) 2011 Samsung Electronics
5 * MyungJoo Ham <myungjoo.ham@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef __MAX8903_CHARGER_H__
24#define __MAX8903_CHARGER_H__
25
26struct max8903_pdata {
27 /*
28 * GPIOs
29 * cen, chg, flt, and usus are optional.
30 * dok, dcm, and uok are not optional depending on the status of
31 * dc_valid and usb_valid.
32 */
33 int cen; /* Charger Enable input */
34 int dok; /* DC(Adapter) Power OK output */
35 int uok; /* USB Power OK output */
36 int chg; /* Charger status output */
37 int flt; /* Fault output */
38 int dcm; /* Current-Limit Mode input (1: DC, 2: USB) */
39 int usus; /* USB Suspend Input (1: suspended) */
40
41 /*
42 * DC(Adapter/TA) is wired
43 * When dc_valid is true,
44 * dok and dcm should be valid.
45 *
46 * At least one of dc_valid or usb_valid should be true.
47 */
48 bool dc_valid;
49 /*
50 * USB is wired
51 * When usb_valid is true,
52 * uok should be valid.
53 */
54 bool usb_valid;
55};
56
57#endif /* __MAX8903_CHARGER_H__ */
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 648c9c58add7..e7576cf9e32d 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -173,12 +173,6 @@ extern void proc_net_remove(struct net *net, const char *name);
173extern struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name, 173extern struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
174 struct proc_dir_entry *parent); 174 struct proc_dir_entry *parent);
175 175
176/* While the {get|set|dup}_mm_exe_file functions are for mm_structs, they are
177 * only needed to implement /proc/<pid>|self/exe so we define them here. */
178extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
179extern struct file *get_mm_exe_file(struct mm_struct *mm);
180extern void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm);
181
182extern struct file *proc_ns_fget(int fd); 176extern struct file *proc_ns_fget(int fd);
183 177
184#else 178#else
@@ -230,19 +224,6 @@ static inline void pid_ns_release_proc(struct pid_namespace *ns)
230{ 224{
231} 225}
232 226
233static inline void set_mm_exe_file(struct mm_struct *mm,
234 struct file *new_exe_file)
235{}
236
237static inline struct file *get_mm_exe_file(struct mm_struct *mm)
238{
239 return NULL;
240}
241
242static inline void dup_mm_exe_file(struct mm_struct *oldmm,
243 struct mm_struct *newmm)
244{}
245
246static inline struct file *proc_ns_fget(int fd) 227static inline struct file *proc_ns_fget(int fd)
247{ 228{
248 return ERR_PTR(-EINVAL); 229 return ERR_PTR(-EINVAL);
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 03ff67b0cdf5..2f007157fab9 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -41,4 +41,44 @@ extern struct ratelimit_state printk_ratelimit_state;
41extern int ___ratelimit(struct ratelimit_state *rs, const char *func); 41extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
42#define __ratelimit(state) ___ratelimit(state, __func__) 42#define __ratelimit(state) ___ratelimit(state, __func__)
43 43
44#ifdef CONFIG_PRINTK
45
46#define WARN_ON_RATELIMIT(condition, state) \
47 WARN_ON((condition) && __ratelimit(state))
48
49#define __WARN_RATELIMIT(condition, state, format...) \
50({ \
51 int rtn = 0; \
52 if (unlikely(__ratelimit(state))) \
53 rtn = WARN(condition, format); \
54 rtn; \
55})
56
57#define WARN_RATELIMIT(condition, format...) \
58({ \
59 static DEFINE_RATELIMIT_STATE(_rs, \
60 DEFAULT_RATELIMIT_INTERVAL, \
61 DEFAULT_RATELIMIT_BURST); \
62 __WARN_RATELIMIT(condition, &_rs, format); \
63})
64
65#else
66
67#define WARN_ON_RATELIMIT(condition, state) \
68 WARN_ON(condition)
69
70#define __WARN_RATELIMIT(condition, state, format...) \
71({ \
72 int rtn = WARN(condition, format); \
73 rtn; \
74})
75
76#define WARN_RATELIMIT(condition, format...) \
77({ \
78 int rtn = WARN(condition, format); \
79 rtn; \
80})
81
82#endif
83
44#endif /* _LINUX_RATELIMIT_H */ 84#endif /* _LINUX_RATELIMIT_H */
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index c4c4fc45f856..ce3127a75c88 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -68,6 +68,8 @@ struct regulator_state {
68 * 68 *
69 * @min_uV: Smallest voltage consumers may set. 69 * @min_uV: Smallest voltage consumers may set.
70 * @max_uV: Largest voltage consumers may set. 70 * @max_uV: Largest voltage consumers may set.
71 * @uV_offset: Offset applied to voltages from consumer to compensate for
72 * voltage drops.
71 * 73 *
72 * @min_uA: Smallest consumers consumers may set. 74 * @min_uA: Smallest consumers consumers may set.
73 * @max_uA: Largest current consumers may set. 75 * @max_uA: Largest current consumers may set.
@@ -99,6 +101,8 @@ struct regulation_constraints {
99 int min_uV; 101 int min_uV;
100 int max_uV; 102 int max_uV;
101 103
104 int uV_offset;
105
102 /* current output range (inclusive) - for current control */ 106 /* current output range (inclusive) - for current control */
103 int min_uA; 107 int min_uA;
104 int max_uA; 108 int max_uA;
@@ -160,8 +164,6 @@ struct regulator_consumer_supply {
160 * @supply_regulator: Parent regulator. Specified using the regulator name 164 * @supply_regulator: Parent regulator. Specified using the regulator name
161 * as it appears in the name field in sysfs, which can 165 * as it appears in the name field in sysfs, which can
162 * be explicitly set using the constraints field 'name'. 166 * be explicitly set using the constraints field 'name'.
163 * @supply_regulator_dev: Parent regulator (if any) - DEPRECATED in favour
164 * of supply_regulator.
165 * 167 *
166 * @constraints: Constraints. These must be specified for the regulator to 168 * @constraints: Constraints. These must be specified for the regulator to
167 * be usable. 169 * be usable.
@@ -173,7 +175,6 @@ struct regulator_consumer_supply {
173 */ 175 */
174struct regulator_init_data { 176struct regulator_init_data {
175 const char *supply_regulator; /* or NULL for system supply */ 177 const char *supply_regulator; /* or NULL for system supply */
176 struct device *supply_regulator_dev; /* or NULL for system supply */
177 178
178 struct regulation_constraints constraints; 179 struct regulation_constraints constraints;
179 180
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 877ece45426f..b27ebea25660 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -92,10 +92,10 @@ struct rtc_pll_info {
92#define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */ 92#define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */
93 93
94/* interrupt flags */ 94/* interrupt flags */
95#define RTC_IRQF 0x80 /* any of the following is active */ 95#define RTC_IRQF 0x80 /* Any of the following is active */
96#define RTC_PF 0x40 96#define RTC_PF 0x40 /* Periodic interrupt */
97#define RTC_AF 0x20 97#define RTC_AF 0x20 /* Alarm interrupt */
98#define RTC_UF 0x10 98#define RTC_UF 0x10 /* Update interrupt for 1Hz RTC */
99 99
100#ifdef __KERNEL__ 100#ifdef __KERNEL__
101 101
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f18300eddfcb..dc8871295a5a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -513,6 +513,7 @@ struct thread_group_cputimer {
513 spinlock_t lock; 513 spinlock_t lock;
514}; 514};
515 515
516#include <linux/rwsem.h>
516struct autogroup; 517struct autogroup;
517 518
518/* 519/*
@@ -632,6 +633,16 @@ struct signal_struct {
632 unsigned audit_tty; 633 unsigned audit_tty;
633 struct tty_audit_buf *tty_audit_buf; 634 struct tty_audit_buf *tty_audit_buf;
634#endif 635#endif
636#ifdef CONFIG_CGROUPS
637 /*
638 * The threadgroup_fork_lock prevents threads from forking with
639 * CLONE_THREAD while held for writing. Use this for fork-sensitive
640 * threadgroup-wide operations. It's taken for reading in fork.c in
641 * copy_process().
642 * Currently only needed write-side by cgroups.
643 */
644 struct rw_semaphore threadgroup_fork_lock;
645#endif
635 646
636 int oom_adj; /* OOM kill score adjustment (bit shift) */ 647 int oom_adj; /* OOM kill score adjustment (bit shift) */
637 int oom_score_adj; /* OOM kill score adjustment */ 648 int oom_score_adj; /* OOM kill score adjustment */
@@ -2323,6 +2334,31 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
2323 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); 2334 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2324} 2335}
2325 2336
2337/* See the declaration of threadgroup_fork_lock in signal_struct. */
2338#ifdef CONFIG_CGROUPS
2339static inline void threadgroup_fork_read_lock(struct task_struct *tsk)
2340{
2341 down_read(&tsk->signal->threadgroup_fork_lock);
2342}
2343static inline void threadgroup_fork_read_unlock(struct task_struct *tsk)
2344{
2345 up_read(&tsk->signal->threadgroup_fork_lock);
2346}
2347static inline void threadgroup_fork_write_lock(struct task_struct *tsk)
2348{
2349 down_write(&tsk->signal->threadgroup_fork_lock);
2350}
2351static inline void threadgroup_fork_write_unlock(struct task_struct *tsk)
2352{
2353 up_write(&tsk->signal->threadgroup_fork_lock);
2354}
2355#else
2356static inline void threadgroup_fork_read_lock(struct task_struct *tsk) {}
2357static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) {}
2358static inline void threadgroup_fork_write_lock(struct task_struct *tsk) {}
2359static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) {}
2360#endif
2361
2326#ifndef __HAVE_THREAD_FUNCTIONS 2362#ifndef __HAVE_THREAD_FUNCTIONS
2327 2363
2328#define task_thread_info(task) ((struct thread_info *)(task)->stack) 2364#define task_thread_info(task) ((struct thread_info *)(task)->stack)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index a5c6da5d8df8..384eb5fe530b 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -257,7 +257,8 @@ extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
257extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 257extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
258 gfp_t gfp_mask, bool noswap, 258 gfp_t gfp_mask, bool noswap,
259 unsigned int swappiness, 259 unsigned int swappiness,
260 struct zone *zone); 260 struct zone *zone,
261 unsigned long *nr_scanned);
261extern int __isolate_lru_page(struct page *page, int mode, int file); 262extern int __isolate_lru_page(struct page *page, int mode, int file);
262extern unsigned long shrink_all_memory(unsigned long nr_pages); 263extern unsigned long shrink_all_memory(unsigned long nr_pages);
263extern int vm_swappiness; 264extern int vm_swappiness;
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
new file mode 100644
index 000000000000..03b90cdc1921
--- /dev/null
+++ b/include/linux/vm_event_item.h
@@ -0,0 +1,64 @@
1#ifndef VM_EVENT_ITEM_H_INCLUDED
2#define VM_EVENT_ITEM_H_INCLUDED
3
4#ifdef CONFIG_ZONE_DMA
5#define DMA_ZONE(xx) xx##_DMA,
6#else
7#define DMA_ZONE(xx)
8#endif
9
10#ifdef CONFIG_ZONE_DMA32
11#define DMA32_ZONE(xx) xx##_DMA32,
12#else
13#define DMA32_ZONE(xx)
14#endif
15
16#ifdef CONFIG_HIGHMEM
17#define HIGHMEM_ZONE(xx) , xx##_HIGH
18#else
19#define HIGHMEM_ZONE(xx)
20#endif
21
22#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
23
24enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
25 FOR_ALL_ZONES(PGALLOC),
26 PGFREE, PGACTIVATE, PGDEACTIVATE,
27 PGFAULT, PGMAJFAULT,
28 FOR_ALL_ZONES(PGREFILL),
29 FOR_ALL_ZONES(PGSTEAL),
30 FOR_ALL_ZONES(PGSCAN_KSWAPD),
31 FOR_ALL_ZONES(PGSCAN_DIRECT),
32#ifdef CONFIG_NUMA
33 PGSCAN_ZONE_RECLAIM_FAILED,
34#endif
35 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
36 KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
37 KSWAPD_SKIP_CONGESTION_WAIT,
38 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
39#ifdef CONFIG_COMPACTION
40 COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
41 COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
42#endif
43#ifdef CONFIG_HUGETLB_PAGE
44 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
45#endif
46 UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
47 UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
48 UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
49 UNEVICTABLE_PGMLOCKED,
50 UNEVICTABLE_PGMUNLOCKED,
51 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
52 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
53 UNEVICTABLE_MLOCKFREED,
54#ifdef CONFIG_TRANSPARENT_HUGEPAGE
55 THP_FAULT_ALLOC,
56 THP_FAULT_FALLBACK,
57 THP_COLLAPSE_ALLOC,
58 THP_COLLAPSE_ALLOC_FAILED,
59 THP_SPLIT,
60#endif
61 NR_VM_EVENT_ITEMS
62};
63
64#endif /* VM_EVENT_ITEM_H_INCLUDED */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 51359837511a..bcd942fa611c 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -5,69 +5,9 @@
5#include <linux/percpu.h> 5#include <linux/percpu.h>
6#include <linux/mm.h> 6#include <linux/mm.h>
7#include <linux/mmzone.h> 7#include <linux/mmzone.h>
8#include <linux/vm_event_item.h>
8#include <asm/atomic.h> 9#include <asm/atomic.h>
9 10
10#ifdef CONFIG_ZONE_DMA
11#define DMA_ZONE(xx) xx##_DMA,
12#else
13#define DMA_ZONE(xx)
14#endif
15
16#ifdef CONFIG_ZONE_DMA32
17#define DMA32_ZONE(xx) xx##_DMA32,
18#else
19#define DMA32_ZONE(xx)
20#endif
21
22#ifdef CONFIG_HIGHMEM
23#define HIGHMEM_ZONE(xx) , xx##_HIGH
24#else
25#define HIGHMEM_ZONE(xx)
26#endif
27
28
29#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
30
31enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
32 FOR_ALL_ZONES(PGALLOC),
33 PGFREE, PGACTIVATE, PGDEACTIVATE,
34 PGFAULT, PGMAJFAULT,
35 FOR_ALL_ZONES(PGREFILL),
36 FOR_ALL_ZONES(PGSTEAL),
37 FOR_ALL_ZONES(PGSCAN_KSWAPD),
38 FOR_ALL_ZONES(PGSCAN_DIRECT),
39#ifdef CONFIG_NUMA
40 PGSCAN_ZONE_RECLAIM_FAILED,
41#endif
42 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
43 KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
44 KSWAPD_SKIP_CONGESTION_WAIT,
45 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
46#ifdef CONFIG_COMPACTION
47 COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
48 COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
49#endif
50#ifdef CONFIG_HUGETLB_PAGE
51 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
52#endif
53 UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
54 UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
55 UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
56 UNEVICTABLE_PGMLOCKED,
57 UNEVICTABLE_PGMUNLOCKED,
58 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
59 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
60 UNEVICTABLE_MLOCKFREED,
61#ifdef CONFIG_TRANSPARENT_HUGEPAGE
62 THP_FAULT_ALLOC,
63 THP_FAULT_FALLBACK,
64 THP_COLLAPSE_ALLOC,
65 THP_COLLAPSE_ALLOC_FAILED,
66 THP_SPLIT,
67#endif
68 NR_VM_EVENT_ITEMS
69};
70
71extern int sysctl_stat_interval; 11extern int sysctl_stat_interval;
72 12
73#ifdef CONFIG_VM_EVENT_COUNTERS 13#ifdef CONFIG_VM_EVENT_COUNTERS
diff --git a/include/media/m5mols.h b/include/media/m5mols.h
new file mode 100644
index 000000000000..2d7e7ca2313d
--- /dev/null
+++ b/include/media/m5mols.h
@@ -0,0 +1,35 @@
1/*
2 * Driver header for M-5MOLS 8M Pixel camera sensor with ISP
3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * Author: HeungJun Kim, riverful.kim@samsung.com
6 *
7 * Copyright (C) 2009 Samsung Electronics Co., Ltd.
8 * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#ifndef MEDIA_M5MOLS_H
17#define MEDIA_M5MOLS_H
18
19/**
20 * struct m5mols_platform_data - platform data for M-5MOLS driver
21 * @irq: GPIO getting the irq pin of M-5MOLS
22 * @gpio_reset: GPIO driving the reset pin of M-5MOLS
23 * @reset_polarity: active state for gpio_rst pin, 0 or 1
24 * @set_power: an additional callback to the board setup code
25 * to be called after enabling and before disabling
26 * the sensor's supply regulators
27 */
28struct m5mols_platform_data {
29 int irq;
30 int gpio_reset;
31 u8 reset_polarity;
32 int (*set_power)(struct device *dev, int on);
33};
34
35#endif /* MEDIA_M5MOLS_H */
diff --git a/include/media/videobuf-dvb.h b/include/media/videobuf-dvb.h
index 07cf4b9d0a65..bf365721d6b0 100644
--- a/include/media/videobuf-dvb.h
+++ b/include/media/videobuf-dvb.h
@@ -4,6 +4,9 @@
4#include <dvb_net.h> 4#include <dvb_net.h>
5#include <dvb_frontend.h> 5#include <dvb_frontend.h>
6 6
7#ifndef _VIDEOBUF_DVB_H_
8#define _VIDEOBUF_DVB_H_
9
7struct videobuf_dvb { 10struct videobuf_dvb {
8 /* filling that the job of the driver */ 11 /* filling that the job of the driver */
9 char *name; 12 char *name;
@@ -54,6 +57,7 @@ void videobuf_dvb_dealloc_frontends(struct videobuf_dvb_frontends *f);
54struct videobuf_dvb_frontend * videobuf_dvb_get_frontend(struct videobuf_dvb_frontends *f, int id); 57struct videobuf_dvb_frontend * videobuf_dvb_get_frontend(struct videobuf_dvb_frontends *f, int id);
55int videobuf_dvb_find_frontend(struct videobuf_dvb_frontends *f, struct dvb_frontend *p); 58int videobuf_dvb_find_frontend(struct videobuf_dvb_frontends *f, struct dvb_frontend *p);
56 59
60#endif /* _VIDEOBUF_DVB_H_ */
57 61
58/* 62/*
59 * Local variables: 63 * Local variables:
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 4fff432aeade..481f856c650f 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -797,7 +797,8 @@ struct netns_ipvs {
797 struct list_head rs_table[IP_VS_RTAB_SIZE]; 797 struct list_head rs_table[IP_VS_RTAB_SIZE];
798 /* ip_vs_app */ 798 /* ip_vs_app */
799 struct list_head app_list; 799 struct list_head app_list;
800 800 /* ip_vs_ftp */
801 struct ip_vs_app *ftp_app;
801 /* ip_vs_proto */ 802 /* ip_vs_proto */
802 #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */ 803 #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */
803 struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE]; 804 struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE];
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index dcc8f5749d3f..2bf9ed9ef26b 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -7,6 +7,7 @@
7#include <asm/atomic.h> 7#include <asm/atomic.h>
8#include <linux/workqueue.h> 8#include <linux/workqueue.h>
9#include <linux/list.h> 9#include <linux/list.h>
10#include <linux/sysctl.h>
10 11
11#include <net/netns/core.h> 12#include <net/netns/core.h>
12#include <net/netns/mib.h> 13#include <net/netns/mib.h>
diff --git a/include/net/net_ratelimit.h b/include/net/net_ratelimit.h
new file mode 100644
index 000000000000..7727b4247daf
--- /dev/null
+++ b/include/net/net_ratelimit.h
@@ -0,0 +1,8 @@
1#ifndef _LINUX_NET_RATELIMIT_H
2#define _LINUX_NET_RATELIMIT_H
3
4#include <linux/ratelimit.h>
5
6extern struct ratelimit_state net_ratelimit_state;
7
8#endif /* _LINUX_NET_RATELIMIT_H */
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 8f6bb9c7f3eb..ee866060f8a4 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -604,6 +604,7 @@ struct sas_domain_function_template {
604 int (*lldd_clear_aca)(struct domain_device *, u8 *lun); 604 int (*lldd_clear_aca)(struct domain_device *, u8 *lun);
605 int (*lldd_clear_task_set)(struct domain_device *, u8 *lun); 605 int (*lldd_clear_task_set)(struct domain_device *, u8 *lun);
606 int (*lldd_I_T_nexus_reset)(struct domain_device *); 606 int (*lldd_I_T_nexus_reset)(struct domain_device *);
607 int (*lldd_ata_soft_reset)(struct domain_device *);
607 int (*lldd_lu_reset)(struct domain_device *, u8 *lun); 608 int (*lldd_lu_reset)(struct domain_device *, u8 *lun);
608 int (*lldd_query_task)(struct sas_task *); 609 int (*lldd_query_task)(struct sas_task *);
609 610
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
index d6e7994aa634..81dd12edc38c 100644
--- a/include/scsi/scsi_tcq.h
+++ b/include/scsi/scsi_tcq.h
@@ -9,6 +9,7 @@
9#define MSG_SIMPLE_TAG 0x20 9#define MSG_SIMPLE_TAG 0x20
10#define MSG_HEAD_TAG 0x21 10#define MSG_HEAD_TAG 0x21
11#define MSG_ORDERED_TAG 0x22 11#define MSG_ORDERED_TAG 0x22
12#define MSG_ACA_TAG 0x24 /* unsupported */
12 13
13#define SCSI_NO_TAG (-1) /* identify no tag in use */ 14#define SCSI_NO_TAG (-1) /* identify no tag in use */
14 15
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 1d3b5b2f0dbc..561ac99def5a 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -98,6 +98,7 @@ enum transport_state_table {
98 TRANSPORT_REMOVE = 14, 98 TRANSPORT_REMOVE = 14,
99 TRANSPORT_FREE = 15, 99 TRANSPORT_FREE = 15,
100 TRANSPORT_NEW_CMD_MAP = 16, 100 TRANSPORT_NEW_CMD_MAP = 16,
101 TRANSPORT_FREE_CMD_INTR = 17,
101}; 102};
102 103
103/* Used for struct se_cmd->se_cmd_flags */ 104/* Used for struct se_cmd->se_cmd_flags */
diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h
index dc78f77f9450..747e1404dca0 100644
--- a/include/target/target_core_fabric_ops.h
+++ b/include/target/target_core_fabric_ops.h
@@ -77,7 +77,6 @@ struct target_core_fabric_ops {
77 u16 (*set_fabric_sense_len)(struct se_cmd *, u32); 77 u16 (*set_fabric_sense_len)(struct se_cmd *, u32);
78 u16 (*get_fabric_sense_len)(void); 78 u16 (*get_fabric_sense_len)(void);
79 int (*is_state_remove)(struct se_cmd *); 79 int (*is_state_remove)(struct se_cmd *);
80 u64 (*pack_lun)(unsigned int);
81 /* 80 /*
82 * fabric module calls for target_core_fabric_configfs.c 81 * fabric module calls for target_core_fabric_configfs.c
83 */ 82 */
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index 59aa464f6ee2..24a1c6cb83c3 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -172,6 +172,7 @@ extern int transport_generic_handle_cdb_map(struct se_cmd *);
172extern int transport_generic_handle_data(struct se_cmd *); 172extern int transport_generic_handle_data(struct se_cmd *);
173extern void transport_new_cmd_failure(struct se_cmd *); 173extern void transport_new_cmd_failure(struct se_cmd *);
174extern int transport_generic_handle_tmr(struct se_cmd *); 174extern int transport_generic_handle_tmr(struct se_cmd *);
175extern void transport_generic_free_cmd_intr(struct se_cmd *);
175extern void __transport_stop_task_timer(struct se_task *, unsigned long *); 176extern void __transport_stop_task_timer(struct se_task *, unsigned long *);
176extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]); 177extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]);
177extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32, 178extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
diff --git a/init/Kconfig b/init/Kconfig
index 332aac649966..ebafac4231ee 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -589,14 +589,6 @@ config CGROUP_DEBUG
589 589
590 Say N if unsure. 590 Say N if unsure.
591 591
592config CGROUP_NS
593 bool "Namespace cgroup subsystem"
594 help
595 Provides a simple namespace cgroup subsystem to
596 provide hierarchical naming of sets of namespaces,
597 for instance virtual servers and checkpoint/restart
598 jobs.
599
600config CGROUP_FREEZER 592config CGROUP_FREEZER
601 bool "Freezer cgroup subsystem" 593 bool "Freezer cgroup subsystem"
602 help 594 help
diff --git a/kernel/Makefile b/kernel/Makefile
index e9cf19155b46..2d64cfcc8b42 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -61,7 +61,6 @@ obj-$(CONFIG_COMPAT) += compat.o
61obj-$(CONFIG_CGROUPS) += cgroup.o 61obj-$(CONFIG_CGROUPS) += cgroup.o
62obj-$(CONFIG_CGROUP_FREEZER) += cgroup_freezer.o 62obj-$(CONFIG_CGROUP_FREEZER) += cgroup_freezer.o
63obj-$(CONFIG_CPUSETS) += cpuset.o 63obj-$(CONFIG_CPUSETS) += cpuset.o
64obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o
65obj-$(CONFIG_UTS_NS) += utsname.o 64obj-$(CONFIG_UTS_NS) += utsname.o
66obj-$(CONFIG_USER_NS) += user_namespace.o 65obj-$(CONFIG_USER_NS) += user_namespace.o
67obj-$(CONFIG_PID_NS) += pid_namespace.o 66obj-$(CONFIG_PID_NS) += pid_namespace.o
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 909a35510af5..2731d115d725 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -57,6 +57,7 @@
57#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */ 57#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
58#include <linux/eventfd.h> 58#include <linux/eventfd.h>
59#include <linux/poll.h> 59#include <linux/poll.h>
60#include <linux/flex_array.h> /* used in cgroup_attach_proc */
60 61
61#include <asm/atomic.h> 62#include <asm/atomic.h>
62 63
@@ -1735,6 +1736,76 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
1735} 1736}
1736EXPORT_SYMBOL_GPL(cgroup_path); 1737EXPORT_SYMBOL_GPL(cgroup_path);
1737 1738
1739/*
1740 * cgroup_task_migrate - move a task from one cgroup to another.
1741 *
1742 * 'guarantee' is set if the caller promises that a new css_set for the task
1743 * will already exist. If not set, this function might sleep, and can fail with
1744 * -ENOMEM. Otherwise, it can only fail with -ESRCH.
1745 */
1746static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
1747 struct task_struct *tsk, bool guarantee)
1748{
1749 struct css_set *oldcg;
1750 struct css_set *newcg;
1751
1752 /*
1753 * get old css_set. we need to take task_lock and refcount it, because
1754 * an exiting task can change its css_set to init_css_set and drop its
1755 * old one without taking cgroup_mutex.
1756 */
1757 task_lock(tsk);
1758 oldcg = tsk->cgroups;
1759 get_css_set(oldcg);
1760 task_unlock(tsk);
1761
1762 /* locate or allocate a new css_set for this task. */
1763 if (guarantee) {
1764 /* we know the css_set we want already exists. */
1765 struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
1766 read_lock(&css_set_lock);
1767 newcg = find_existing_css_set(oldcg, cgrp, template);
1768 BUG_ON(!newcg);
1769 get_css_set(newcg);
1770 read_unlock(&css_set_lock);
1771 } else {
1772 might_sleep();
1773 /* find_css_set will give us newcg already referenced. */
1774 newcg = find_css_set(oldcg, cgrp);
1775 if (!newcg) {
1776 put_css_set(oldcg);
1777 return -ENOMEM;
1778 }
1779 }
1780 put_css_set(oldcg);
1781
1782 /* if PF_EXITING is set, the tsk->cgroups pointer is no longer safe. */
1783 task_lock(tsk);
1784 if (tsk->flags & PF_EXITING) {
1785 task_unlock(tsk);
1786 put_css_set(newcg);
1787 return -ESRCH;
1788 }
1789 rcu_assign_pointer(tsk->cgroups, newcg);
1790 task_unlock(tsk);
1791
1792 /* Update the css_set linked lists if we're using them */
1793 write_lock(&css_set_lock);
1794 if (!list_empty(&tsk->cg_list))
1795 list_move(&tsk->cg_list, &newcg->tasks);
1796 write_unlock(&css_set_lock);
1797
1798 /*
1799 * We just gained a reference on oldcg by taking it from the task. As
1800 * trading it for newcg is protected by cgroup_mutex, we're safe to drop
1801 * it here; it will be freed under RCU.
1802 */
1803 put_css_set(oldcg);
1804
1805 set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
1806 return 0;
1807}
1808
1738/** 1809/**
1739 * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp' 1810 * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp'
1740 * @cgrp: the cgroup the task is attaching to 1811 * @cgrp: the cgroup the task is attaching to
@@ -1745,11 +1816,9 @@ EXPORT_SYMBOL_GPL(cgroup_path);
1745 */ 1816 */
1746int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) 1817int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1747{ 1818{
1748 int retval = 0; 1819 int retval;
1749 struct cgroup_subsys *ss, *failed_ss = NULL; 1820 struct cgroup_subsys *ss, *failed_ss = NULL;
1750 struct cgroup *oldcgrp; 1821 struct cgroup *oldcgrp;
1751 struct css_set *cg;
1752 struct css_set *newcg;
1753 struct cgroupfs_root *root = cgrp->root; 1822 struct cgroupfs_root *root = cgrp->root;
1754 1823
1755 /* Nothing to do if the task is already in that cgroup */ 1824 /* Nothing to do if the task is already in that cgroup */
@@ -1759,7 +1828,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1759 1828
1760 for_each_subsys(root, ss) { 1829 for_each_subsys(root, ss) {
1761 if (ss->can_attach) { 1830 if (ss->can_attach) {
1762 retval = ss->can_attach(ss, cgrp, tsk, false); 1831 retval = ss->can_attach(ss, cgrp, tsk);
1763 if (retval) { 1832 if (retval) {
1764 /* 1833 /*
1765 * Remember on which subsystem the can_attach() 1834 * Remember on which subsystem the can_attach()
@@ -1771,46 +1840,29 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1771 goto out; 1840 goto out;
1772 } 1841 }
1773 } 1842 }
1843 if (ss->can_attach_task) {
1844 retval = ss->can_attach_task(cgrp, tsk);
1845 if (retval) {
1846 failed_ss = ss;
1847 goto out;
1848 }
1849 }
1774 } 1850 }
1775 1851
1776 task_lock(tsk); 1852 retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, false);
1777 cg = tsk->cgroups; 1853 if (retval)
1778 get_css_set(cg);
1779 task_unlock(tsk);
1780 /*
1781 * Locate or allocate a new css_set for this task,
1782 * based on its final set of cgroups
1783 */
1784 newcg = find_css_set(cg, cgrp);
1785 put_css_set(cg);
1786 if (!newcg) {
1787 retval = -ENOMEM;
1788 goto out;
1789 }
1790
1791 task_lock(tsk);
1792 if (tsk->flags & PF_EXITING) {
1793 task_unlock(tsk);
1794 put_css_set(newcg);
1795 retval = -ESRCH;
1796 goto out; 1854 goto out;
1797 }
1798 rcu_assign_pointer(tsk->cgroups, newcg);
1799 task_unlock(tsk);
1800
1801 /* Update the css_set linked lists if we're using them */
1802 write_lock(&css_set_lock);
1803 if (!list_empty(&tsk->cg_list))
1804 list_move(&tsk->cg_list, &newcg->tasks);
1805 write_unlock(&css_set_lock);
1806 1855
1807 for_each_subsys(root, ss) { 1856 for_each_subsys(root, ss) {
1857 if (ss->pre_attach)
1858 ss->pre_attach(cgrp);
1859 if (ss->attach_task)
1860 ss->attach_task(cgrp, tsk);
1808 if (ss->attach) 1861 if (ss->attach)
1809 ss->attach(ss, cgrp, oldcgrp, tsk, false); 1862 ss->attach(ss, cgrp, oldcgrp, tsk);
1810 } 1863 }
1811 set_bit(CGRP_RELEASABLE, &oldcgrp->flags); 1864
1812 synchronize_rcu(); 1865 synchronize_rcu();
1813 put_css_set(cg);
1814 1866
1815 /* 1867 /*
1816 * wake up rmdir() waiter. the rmdir should fail since the cgroup 1868 * wake up rmdir() waiter. the rmdir should fail since the cgroup
@@ -1829,7 +1881,7 @@ out:
1829 */ 1881 */
1830 break; 1882 break;
1831 if (ss->cancel_attach) 1883 if (ss->cancel_attach)
1832 ss->cancel_attach(ss, cgrp, tsk, false); 1884 ss->cancel_attach(ss, cgrp, tsk);
1833 } 1885 }
1834 } 1886 }
1835 return retval; 1887 return retval;
@@ -1860,49 +1912,370 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
1860EXPORT_SYMBOL_GPL(cgroup_attach_task_all); 1912EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
1861 1913
1862/* 1914/*
1863 * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex 1915 * cgroup_attach_proc works in two stages, the first of which prefetches all
1864 * held. May take task_lock of task 1916 * new css_sets needed (to make sure we have enough memory before committing
1917 * to the move) and stores them in a list of entries of the following type.
1918 * TODO: possible optimization: use css_set->rcu_head for chaining instead
1919 */
1920struct cg_list_entry {
1921 struct css_set *cg;
1922 struct list_head links;
1923};
1924
1925static bool css_set_check_fetched(struct cgroup *cgrp,
1926 struct task_struct *tsk, struct css_set *cg,
1927 struct list_head *newcg_list)
1928{
1929 struct css_set *newcg;
1930 struct cg_list_entry *cg_entry;
1931 struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
1932
1933 read_lock(&css_set_lock);
1934 newcg = find_existing_css_set(cg, cgrp, template);
1935 if (newcg)
1936 get_css_set(newcg);
1937 read_unlock(&css_set_lock);
1938
1939 /* doesn't exist at all? */
1940 if (!newcg)
1941 return false;
1942 /* see if it's already in the list */
1943 list_for_each_entry(cg_entry, newcg_list, links) {
1944 if (cg_entry->cg == newcg) {
1945 put_css_set(newcg);
1946 return true;
1947 }
1948 }
1949
1950 /* not found */
1951 put_css_set(newcg);
1952 return false;
1953}
1954
1955/*
1956 * Find the new css_set and store it in the list in preparation for moving the
1957 * given task to the given cgroup. Returns 0 or -ENOMEM.
1958 */
1959static int css_set_prefetch(struct cgroup *cgrp, struct css_set *cg,
1960 struct list_head *newcg_list)
1961{
1962 struct css_set *newcg;
1963 struct cg_list_entry *cg_entry;
1964
1965 /* ensure a new css_set will exist for this thread */
1966 newcg = find_css_set(cg, cgrp);
1967 if (!newcg)
1968 return -ENOMEM;
1969 /* add it to the list */
1970 cg_entry = kmalloc(sizeof(struct cg_list_entry), GFP_KERNEL);
1971 if (!cg_entry) {
1972 put_css_set(newcg);
1973 return -ENOMEM;
1974 }
1975 cg_entry->cg = newcg;
1976 list_add(&cg_entry->links, newcg_list);
1977 return 0;
1978}
1979
1980/**
1981 * cgroup_attach_proc - attach all threads in a threadgroup to a cgroup
1982 * @cgrp: the cgroup to attach to
1983 * @leader: the threadgroup leader task_struct of the group to be attached
1984 *
1985 * Call holding cgroup_mutex and the threadgroup_fork_lock of the leader. Will
1986 * take task_lock of each thread in leader's threadgroup individually in turn.
1987 */
1988int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
1989{
1990 int retval, i, group_size;
1991 struct cgroup_subsys *ss, *failed_ss = NULL;
1992 bool cancel_failed_ss = false;
1993 /* guaranteed to be initialized later, but the compiler needs this */
1994 struct cgroup *oldcgrp = NULL;
1995 struct css_set *oldcg;
1996 struct cgroupfs_root *root = cgrp->root;
1997 /* threadgroup list cursor and array */
1998 struct task_struct *tsk;
1999 struct flex_array *group;
2000 /*
2001 * we need to make sure we have css_sets for all the tasks we're
2002 * going to move -before- we actually start moving them, so that in
2003 * case we get an ENOMEM we can bail out before making any changes.
2004 */
2005 struct list_head newcg_list;
2006 struct cg_list_entry *cg_entry, *temp_nobe;
2007
2008 /*
2009 * step 0: in order to do expensive, possibly blocking operations for
2010 * every thread, we cannot iterate the thread group list, since it needs
2011 * rcu or tasklist locked. instead, build an array of all threads in the
2012 * group - threadgroup_fork_lock prevents new threads from appearing,
2013 * and if threads exit, this will just be an over-estimate.
2014 */
2015 group_size = get_nr_threads(leader);
2016 /* flex_array supports very large thread-groups better than kmalloc. */
2017 group = flex_array_alloc(sizeof(struct task_struct *), group_size,
2018 GFP_KERNEL);
2019 if (!group)
2020 return -ENOMEM;
2021 /* pre-allocate to guarantee space while iterating in rcu read-side. */
2022 retval = flex_array_prealloc(group, 0, group_size - 1, GFP_KERNEL);
2023 if (retval)
2024 goto out_free_group_list;
2025
2026 /* prevent changes to the threadgroup list while we take a snapshot. */
2027 rcu_read_lock();
2028 if (!thread_group_leader(leader)) {
2029 /*
2030 * a race with de_thread from another thread's exec() may strip
2031 * us of our leadership, making while_each_thread unsafe to use
2032 * on this task. if this happens, there is no choice but to
2033 * throw this task away and try again (from cgroup_procs_write);
2034 * this is "double-double-toil-and-trouble-check locking".
2035 */
2036 rcu_read_unlock();
2037 retval = -EAGAIN;
2038 goto out_free_group_list;
2039 }
2040 /* take a reference on each task in the group to go in the array. */
2041 tsk = leader;
2042 i = 0;
2043 do {
2044 /* as per above, nr_threads may decrease, but not increase. */
2045 BUG_ON(i >= group_size);
2046 get_task_struct(tsk);
2047 /*
2048 * saying GFP_ATOMIC has no effect here because we did prealloc
2049 * earlier, but it's good form to communicate our expectations.
2050 */
2051 retval = flex_array_put_ptr(group, i, tsk, GFP_ATOMIC);
2052 BUG_ON(retval != 0);
2053 i++;
2054 } while_each_thread(leader, tsk);
2055 /* remember the number of threads in the array for later. */
2056 group_size = i;
2057 rcu_read_unlock();
2058
2059 /*
2060 * step 1: check that we can legitimately attach to the cgroup.
2061 */
2062 for_each_subsys(root, ss) {
2063 if (ss->can_attach) {
2064 retval = ss->can_attach(ss, cgrp, leader);
2065 if (retval) {
2066 failed_ss = ss;
2067 goto out_cancel_attach;
2068 }
2069 }
2070 /* a callback to be run on every thread in the threadgroup. */
2071 if (ss->can_attach_task) {
2072 /* run on each task in the threadgroup. */
2073 for (i = 0; i < group_size; i++) {
2074 tsk = flex_array_get_ptr(group, i);
2075 retval = ss->can_attach_task(cgrp, tsk);
2076 if (retval) {
2077 failed_ss = ss;
2078 cancel_failed_ss = true;
2079 goto out_cancel_attach;
2080 }
2081 }
2082 }
2083 }
2084
2085 /*
2086 * step 2: make sure css_sets exist for all threads to be migrated.
2087 * we use find_css_set, which allocates a new one if necessary.
2088 */
2089 INIT_LIST_HEAD(&newcg_list);
2090 for (i = 0; i < group_size; i++) {
2091 tsk = flex_array_get_ptr(group, i);
2092 /* nothing to do if this task is already in the cgroup */
2093 oldcgrp = task_cgroup_from_root(tsk, root);
2094 if (cgrp == oldcgrp)
2095 continue;
2096 /* get old css_set pointer */
2097 task_lock(tsk);
2098 if (tsk->flags & PF_EXITING) {
2099 /* ignore this task if it's going away */
2100 task_unlock(tsk);
2101 continue;
2102 }
2103 oldcg = tsk->cgroups;
2104 get_css_set(oldcg);
2105 task_unlock(tsk);
2106 /* see if the new one for us is already in the list? */
2107 if (css_set_check_fetched(cgrp, tsk, oldcg, &newcg_list)) {
2108 /* was already there, nothing to do. */
2109 put_css_set(oldcg);
2110 } else {
2111 /* we don't already have it. get new one. */
2112 retval = css_set_prefetch(cgrp, oldcg, &newcg_list);
2113 put_css_set(oldcg);
2114 if (retval)
2115 goto out_list_teardown;
2116 }
2117 }
2118
2119 /*
2120 * step 3: now that we're guaranteed success wrt the css_sets, proceed
2121 * to move all tasks to the new cgroup, calling ss->attach_task for each
2122 * one along the way. there are no failure cases after here, so this is
2123 * the commit point.
2124 */
2125 for_each_subsys(root, ss) {
2126 if (ss->pre_attach)
2127 ss->pre_attach(cgrp);
2128 }
2129 for (i = 0; i < group_size; i++) {
2130 tsk = flex_array_get_ptr(group, i);
2131 /* leave current thread as it is if it's already there */
2132 oldcgrp = task_cgroup_from_root(tsk, root);
2133 if (cgrp == oldcgrp)
2134 continue;
2135 /* attach each task to each subsystem */
2136 for_each_subsys(root, ss) {
2137 if (ss->attach_task)
2138 ss->attach_task(cgrp, tsk);
2139 }
2140 /* if the thread is PF_EXITING, it can just get skipped. */
2141 retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, true);
2142 BUG_ON(retval != 0 && retval != -ESRCH);
2143 }
2144 /* nothing is sensitive to fork() after this point. */
2145
2146 /*
2147 * step 4: do expensive, non-thread-specific subsystem callbacks.
2148 * TODO: if ever a subsystem needs to know the oldcgrp for each task
2149 * being moved, this call will need to be reworked to communicate that.
2150 */
2151 for_each_subsys(root, ss) {
2152 if (ss->attach)
2153 ss->attach(ss, cgrp, oldcgrp, leader);
2154 }
2155
2156 /*
2157 * step 5: success! and cleanup
2158 */
2159 synchronize_rcu();
2160 cgroup_wakeup_rmdir_waiter(cgrp);
2161 retval = 0;
2162out_list_teardown:
2163 /* clean up the list of prefetched css_sets. */
2164 list_for_each_entry_safe(cg_entry, temp_nobe, &newcg_list, links) {
2165 list_del(&cg_entry->links);
2166 put_css_set(cg_entry->cg);
2167 kfree(cg_entry);
2168 }
2169out_cancel_attach:
2170 /* same deal as in cgroup_attach_task */
2171 if (retval) {
2172 for_each_subsys(root, ss) {
2173 if (ss == failed_ss) {
2174 if (cancel_failed_ss && ss->cancel_attach)
2175 ss->cancel_attach(ss, cgrp, leader);
2176 break;
2177 }
2178 if (ss->cancel_attach)
2179 ss->cancel_attach(ss, cgrp, leader);
2180 }
2181 }
2182 /* clean up the array of referenced threads in the group. */
2183 for (i = 0; i < group_size; i++) {
2184 tsk = flex_array_get_ptr(group, i);
2185 put_task_struct(tsk);
2186 }
2187out_free_group_list:
2188 flex_array_free(group);
2189 return retval;
2190}
2191
2192/*
2193 * Find the task_struct of the task to attach by vpid and pass it along to the
2194 * function to attach either it or all tasks in its threadgroup. Will take
2195 * cgroup_mutex; may take task_lock of task.
1865 */ 2196 */
1866static int attach_task_by_pid(struct cgroup *cgrp, u64 pid) 2197static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
1867{ 2198{
1868 struct task_struct *tsk; 2199 struct task_struct *tsk;
1869 const struct cred *cred = current_cred(), *tcred; 2200 const struct cred *cred = current_cred(), *tcred;
1870 int ret; 2201 int ret;
1871 2202
2203 if (!cgroup_lock_live_group(cgrp))
2204 return -ENODEV;
2205
1872 if (pid) { 2206 if (pid) {
1873 rcu_read_lock(); 2207 rcu_read_lock();
1874 tsk = find_task_by_vpid(pid); 2208 tsk = find_task_by_vpid(pid);
1875 if (!tsk || tsk->flags & PF_EXITING) { 2209 if (!tsk) {
1876 rcu_read_unlock(); 2210 rcu_read_unlock();
2211 cgroup_unlock();
2212 return -ESRCH;
2213 }
2214 if (threadgroup) {
2215 /*
2216 * RCU protects this access, since tsk was found in the
2217 * tid map. a race with de_thread may cause group_leader
2218 * to stop being the leader, but cgroup_attach_proc will
2219 * detect it later.
2220 */
2221 tsk = tsk->group_leader;
2222 } else if (tsk->flags & PF_EXITING) {
2223 /* optimization for the single-task-only case */
2224 rcu_read_unlock();
2225 cgroup_unlock();
1877 return -ESRCH; 2226 return -ESRCH;
1878 } 2227 }
1879 2228
2229 /*
2230 * even if we're attaching all tasks in the thread group, we
2231 * only need to check permissions on one of them.
2232 */
1880 tcred = __task_cred(tsk); 2233 tcred = __task_cred(tsk);
1881 if (cred->euid && 2234 if (cred->euid &&
1882 cred->euid != tcred->uid && 2235 cred->euid != tcred->uid &&
1883 cred->euid != tcred->suid) { 2236 cred->euid != tcred->suid) {
1884 rcu_read_unlock(); 2237 rcu_read_unlock();
2238 cgroup_unlock();
1885 return -EACCES; 2239 return -EACCES;
1886 } 2240 }
1887 get_task_struct(tsk); 2241 get_task_struct(tsk);
1888 rcu_read_unlock(); 2242 rcu_read_unlock();
1889 } else { 2243 } else {
1890 tsk = current; 2244 if (threadgroup)
2245 tsk = current->group_leader;
2246 else
2247 tsk = current;
1891 get_task_struct(tsk); 2248 get_task_struct(tsk);
1892 } 2249 }
1893 2250
1894 ret = cgroup_attach_task(cgrp, tsk); 2251 if (threadgroup) {
2252 threadgroup_fork_write_lock(tsk);
2253 ret = cgroup_attach_proc(cgrp, tsk);
2254 threadgroup_fork_write_unlock(tsk);
2255 } else {
2256 ret = cgroup_attach_task(cgrp, tsk);
2257 }
1895 put_task_struct(tsk); 2258 put_task_struct(tsk);
2259 cgroup_unlock();
1896 return ret; 2260 return ret;
1897} 2261}
1898 2262
1899static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid) 2263static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
1900{ 2264{
2265 return attach_task_by_pid(cgrp, pid, false);
2266}
2267
2268static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid)
2269{
1901 int ret; 2270 int ret;
1902 if (!cgroup_lock_live_group(cgrp)) 2271 do {
1903 return -ENODEV; 2272 /*
1904 ret = attach_task_by_pid(cgrp, pid); 2273 * attach_proc fails with -EAGAIN if threadgroup leadership
1905 cgroup_unlock(); 2274 * changes in the middle of the operation, in which case we need
2275 * to find the task_struct for the new leader and start over.
2276 */
2277 ret = attach_task_by_pid(cgrp, tgid, true);
2278 } while (ret == -EAGAIN);
1906 return ret; 2279 return ret;
1907} 2280}
1908 2281
@@ -3259,9 +3632,9 @@ static struct cftype files[] = {
3259 { 3632 {
3260 .name = CGROUP_FILE_GENERIC_PREFIX "procs", 3633 .name = CGROUP_FILE_GENERIC_PREFIX "procs",
3261 .open = cgroup_procs_open, 3634 .open = cgroup_procs_open,
3262 /* .write_u64 = cgroup_procs_write, TODO */ 3635 .write_u64 = cgroup_procs_write,
3263 .release = cgroup_pidlist_release, 3636 .release = cgroup_pidlist_release,
3264 .mode = S_IRUGO, 3637 .mode = S_IRUGO | S_IWUSR,
3265 }, 3638 },
3266 { 3639 {
3267 .name = "notify_on_release", 3640 .name = "notify_on_release",
@@ -4257,122 +4630,6 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
4257} 4630}
4258 4631
4259/** 4632/**
4260 * cgroup_clone - clone the cgroup the given subsystem is attached to
4261 * @tsk: the task to be moved
4262 * @subsys: the given subsystem
4263 * @nodename: the name for the new cgroup
4264 *
4265 * Duplicate the current cgroup in the hierarchy that the given
4266 * subsystem is attached to, and move this task into the new
4267 * child.
4268 */
4269int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
4270 char *nodename)
4271{
4272 struct dentry *dentry;
4273 int ret = 0;
4274 struct cgroup *parent, *child;
4275 struct inode *inode;
4276 struct css_set *cg;
4277 struct cgroupfs_root *root;
4278 struct cgroup_subsys *ss;
4279
4280 /* We shouldn't be called by an unregistered subsystem */
4281 BUG_ON(!subsys->active);
4282
4283 /* First figure out what hierarchy and cgroup we're dealing
4284 * with, and pin them so we can drop cgroup_mutex */
4285 mutex_lock(&cgroup_mutex);
4286 again:
4287 root = subsys->root;
4288 if (root == &rootnode) {
4289 mutex_unlock(&cgroup_mutex);
4290 return 0;
4291 }
4292
4293 /* Pin the hierarchy */
4294 if (!atomic_inc_not_zero(&root->sb->s_active)) {
4295 /* We race with the final deactivate_super() */
4296 mutex_unlock(&cgroup_mutex);
4297 return 0;
4298 }
4299
4300 /* Keep the cgroup alive */
4301 task_lock(tsk);
4302 parent = task_cgroup(tsk, subsys->subsys_id);
4303 cg = tsk->cgroups;
4304 get_css_set(cg);
4305 task_unlock(tsk);
4306
4307 mutex_unlock(&cgroup_mutex);
4308
4309 /* Now do the VFS work to create a cgroup */
4310 inode = parent->dentry->d_inode;
4311
4312 /* Hold the parent directory mutex across this operation to
4313 * stop anyone else deleting the new cgroup */
4314 mutex_lock(&inode->i_mutex);
4315 dentry = lookup_one_len(nodename, parent->dentry, strlen(nodename));
4316 if (IS_ERR(dentry)) {
4317 printk(KERN_INFO
4318 "cgroup: Couldn't allocate dentry for %s: %ld\n", nodename,
4319 PTR_ERR(dentry));
4320 ret = PTR_ERR(dentry);
4321 goto out_release;
4322 }
4323
4324 /* Create the cgroup directory, which also creates the cgroup */
4325 ret = vfs_mkdir(inode, dentry, 0755);
4326 child = __d_cgrp(dentry);
4327 dput(dentry);
4328 if (ret) {
4329 printk(KERN_INFO
4330 "Failed to create cgroup %s: %d\n", nodename,
4331 ret);
4332 goto out_release;
4333 }
4334
4335 /* The cgroup now exists. Retake cgroup_mutex and check
4336 * that we're still in the same state that we thought we
4337 * were. */
4338 mutex_lock(&cgroup_mutex);
4339 if ((root != subsys->root) ||
4340 (parent != task_cgroup(tsk, subsys->subsys_id))) {
4341 /* Aargh, we raced ... */
4342 mutex_unlock(&inode->i_mutex);
4343 put_css_set(cg);
4344
4345 deactivate_super(root->sb);
4346 /* The cgroup is still accessible in the VFS, but
4347 * we're not going to try to rmdir() it at this
4348 * point. */
4349 printk(KERN_INFO
4350 "Race in cgroup_clone() - leaking cgroup %s\n",
4351 nodename);
4352 goto again;
4353 }
4354
4355 /* do any required auto-setup */
4356 for_each_subsys(root, ss) {
4357 if (ss->post_clone)
4358 ss->post_clone(ss, child);
4359 }
4360
4361 /* All seems fine. Finish by moving the task into the new cgroup */
4362 ret = cgroup_attach_task(child, tsk);
4363 mutex_unlock(&cgroup_mutex);
4364
4365 out_release:
4366 mutex_unlock(&inode->i_mutex);
4367
4368 mutex_lock(&cgroup_mutex);
4369 put_css_set(cg);
4370 mutex_unlock(&cgroup_mutex);
4371 deactivate_super(root->sb);
4372 return ret;
4373}
4374
4375/**
4376 * cgroup_is_descendant - see if @cgrp is a descendant of @task's cgrp 4633 * cgroup_is_descendant - see if @cgrp is a descendant of @task's cgrp
4377 * @cgrp: the cgroup in question 4634 * @cgrp: the cgroup in question
4378 * @task: the task in question 4635 * @task: the task in question
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index e7bebb7c6c38..e691818d7e45 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -160,7 +160,7 @@ static void freezer_destroy(struct cgroup_subsys *ss,
160 */ 160 */
161static int freezer_can_attach(struct cgroup_subsys *ss, 161static int freezer_can_attach(struct cgroup_subsys *ss,
162 struct cgroup *new_cgroup, 162 struct cgroup *new_cgroup,
163 struct task_struct *task, bool threadgroup) 163 struct task_struct *task)
164{ 164{
165 struct freezer *freezer; 165 struct freezer *freezer;
166 166
@@ -172,26 +172,17 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
172 if (freezer->state != CGROUP_THAWED) 172 if (freezer->state != CGROUP_THAWED)
173 return -EBUSY; 173 return -EBUSY;
174 174
175 return 0;
176}
177
178static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
179{
175 rcu_read_lock(); 180 rcu_read_lock();
176 if (__cgroup_freezing_or_frozen(task)) { 181 if (__cgroup_freezing_or_frozen(tsk)) {
177 rcu_read_unlock(); 182 rcu_read_unlock();
178 return -EBUSY; 183 return -EBUSY;
179 } 184 }
180 rcu_read_unlock(); 185 rcu_read_unlock();
181
182 if (threadgroup) {
183 struct task_struct *c;
184
185 rcu_read_lock();
186 list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
187 if (__cgroup_freezing_or_frozen(c)) {
188 rcu_read_unlock();
189 return -EBUSY;
190 }
191 }
192 rcu_read_unlock();
193 }
194
195 return 0; 186 return 0;
196} 187}
197 188
@@ -390,6 +381,9 @@ struct cgroup_subsys freezer_subsys = {
390 .populate = freezer_populate, 381 .populate = freezer_populate,
391 .subsys_id = freezer_subsys_id, 382 .subsys_id = freezer_subsys_id,
392 .can_attach = freezer_can_attach, 383 .can_attach = freezer_can_attach,
384 .can_attach_task = freezer_can_attach_task,
385 .pre_attach = NULL,
386 .attach_task = NULL,
393 .attach = NULL, 387 .attach = NULL,
394 .fork = freezer_fork, 388 .fork = freezer_fork,
395 .exit = NULL, 389 .exit = NULL,
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 2bb8c2e98fff..1ceeb049c827 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1367,14 +1367,10 @@ static int fmeter_getrate(struct fmeter *fmp)
1367 return val; 1367 return val;
1368} 1368}
1369 1369
1370/* Protected by cgroup_lock */
1371static cpumask_var_t cpus_attach;
1372
1373/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ 1370/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
1374static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, 1371static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
1375 struct task_struct *tsk, bool threadgroup) 1372 struct task_struct *tsk)
1376{ 1373{
1377 int ret;
1378 struct cpuset *cs = cgroup_cs(cont); 1374 struct cpuset *cs = cgroup_cs(cont);
1379 1375
1380 if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) 1376 if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
@@ -1391,29 +1387,42 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
1391 if (tsk->flags & PF_THREAD_BOUND) 1387 if (tsk->flags & PF_THREAD_BOUND)
1392 return -EINVAL; 1388 return -EINVAL;
1393 1389
1394 ret = security_task_setscheduler(tsk);
1395 if (ret)
1396 return ret;
1397 if (threadgroup) {
1398 struct task_struct *c;
1399
1400 rcu_read_lock();
1401 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
1402 ret = security_task_setscheduler(c);
1403 if (ret) {
1404 rcu_read_unlock();
1405 return ret;
1406 }
1407 }
1408 rcu_read_unlock();
1409 }
1410 return 0; 1390 return 0;
1411} 1391}
1412 1392
1413static void cpuset_attach_task(struct task_struct *tsk, nodemask_t *to, 1393static int cpuset_can_attach_task(struct cgroup *cgrp, struct task_struct *task)
1414 struct cpuset *cs) 1394{
1395 return security_task_setscheduler(task);
1396}
1397
1398/*
1399 * Protected by cgroup_lock. The nodemasks must be stored globally because
1400 * dynamically allocating them is not allowed in pre_attach, and they must
1401 * persist among pre_attach, attach_task, and attach.
1402 */
1403static cpumask_var_t cpus_attach;
1404static nodemask_t cpuset_attach_nodemask_from;
1405static nodemask_t cpuset_attach_nodemask_to;
1406
1407/* Set-up work for before attaching each task. */
1408static void cpuset_pre_attach(struct cgroup *cont)
1409{
1410 struct cpuset *cs = cgroup_cs(cont);
1411
1412 if (cs == &top_cpuset)
1413 cpumask_copy(cpus_attach, cpu_possible_mask);
1414 else
1415 guarantee_online_cpus(cs, cpus_attach);
1416
1417 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
1418}
1419
1420/* Per-thread attachment work. */
1421static void cpuset_attach_task(struct cgroup *cont, struct task_struct *tsk)
1415{ 1422{
1416 int err; 1423 int err;
1424 struct cpuset *cs = cgroup_cs(cont);
1425
1417 /* 1426 /*
1418 * can_attach beforehand should guarantee that this doesn't fail. 1427 * can_attach beforehand should guarantee that this doesn't fail.
1419 * TODO: have a better way to handle failure here 1428 * TODO: have a better way to handle failure here
@@ -1421,45 +1430,29 @@ static void cpuset_attach_task(struct task_struct *tsk, nodemask_t *to,
1421 err = set_cpus_allowed_ptr(tsk, cpus_attach); 1430 err = set_cpus_allowed_ptr(tsk, cpus_attach);
1422 WARN_ON_ONCE(err); 1431 WARN_ON_ONCE(err);
1423 1432
1424 cpuset_change_task_nodemask(tsk, to); 1433 cpuset_change_task_nodemask(tsk, &cpuset_attach_nodemask_to);
1425 cpuset_update_task_spread_flag(cs, tsk); 1434 cpuset_update_task_spread_flag(cs, tsk);
1426
1427} 1435}
1428 1436
1429static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont, 1437static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont,
1430 struct cgroup *oldcont, struct task_struct *tsk, 1438 struct cgroup *oldcont, struct task_struct *tsk)
1431 bool threadgroup)
1432{ 1439{
1433 struct mm_struct *mm; 1440 struct mm_struct *mm;
1434 struct cpuset *cs = cgroup_cs(cont); 1441 struct cpuset *cs = cgroup_cs(cont);
1435 struct cpuset *oldcs = cgroup_cs(oldcont); 1442 struct cpuset *oldcs = cgroup_cs(oldcont);
1436 static nodemask_t to; /* protected by cgroup_mutex */
1437 1443
1438 if (cs == &top_cpuset) { 1444 /*
1439 cpumask_copy(cpus_attach, cpu_possible_mask); 1445 * Change mm, possibly for multiple threads in a threadgroup. This is
1440 } else { 1446 * expensive and may sleep.
1441 guarantee_online_cpus(cs, cpus_attach); 1447 */
1442 } 1448 cpuset_attach_nodemask_from = oldcs->mems_allowed;
1443 guarantee_online_mems(cs, &to); 1449 cpuset_attach_nodemask_to = cs->mems_allowed;
1444
1445 /* do per-task migration stuff possibly for each in the threadgroup */
1446 cpuset_attach_task(tsk, &to, cs);
1447 if (threadgroup) {
1448 struct task_struct *c;
1449 rcu_read_lock();
1450 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
1451 cpuset_attach_task(c, &to, cs);
1452 }
1453 rcu_read_unlock();
1454 }
1455
1456 /* change mm; only needs to be done once even if threadgroup */
1457 to = cs->mems_allowed;
1458 mm = get_task_mm(tsk); 1450 mm = get_task_mm(tsk);
1459 if (mm) { 1451 if (mm) {
1460 mpol_rebind_mm(mm, &to); 1452 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
1461 if (is_memory_migrate(cs)) 1453 if (is_memory_migrate(cs))
1462 cpuset_migrate_mm(mm, &oldcs->mems_allowed, &to); 1454 cpuset_migrate_mm(mm, &cpuset_attach_nodemask_from,
1455 &cpuset_attach_nodemask_to);
1463 mmput(mm); 1456 mmput(mm);
1464 } 1457 }
1465} 1458}
@@ -1809,10 +1802,9 @@ static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
1809} 1802}
1810 1803
1811/* 1804/*
1812 * post_clone() is called at the end of cgroup_clone(). 1805 * post_clone() is called during cgroup_create() when the
1813 * 'cgroup' was just created automatically as a result of 1806 * clone_children mount argument was specified. The cgroup
1814 * a cgroup_clone(), and the current task is about to 1807 * can not yet have any tasks.
1815 * be moved into 'cgroup'.
1816 * 1808 *
1817 * Currently we refuse to set up the cgroup - thereby 1809 * Currently we refuse to set up the cgroup - thereby
1818 * refusing the task to be entered, and as a result refusing 1810 * refusing the task to be entered, and as a result refusing
@@ -1911,6 +1903,9 @@ struct cgroup_subsys cpuset_subsys = {
1911 .create = cpuset_create, 1903 .create = cpuset_create,
1912 .destroy = cpuset_destroy, 1904 .destroy = cpuset_destroy,
1913 .can_attach = cpuset_can_attach, 1905 .can_attach = cpuset_can_attach,
1906 .can_attach_task = cpuset_can_attach_task,
1907 .pre_attach = cpuset_pre_attach,
1908 .attach_task = cpuset_attach_task,
1914 .attach = cpuset_attach, 1909 .attach = cpuset_attach,
1915 .populate = cpuset_populate, 1910 .populate = cpuset_populate,
1916 .post_clone = cpuset_post_clone, 1911 .post_clone = cpuset_post_clone,
diff --git a/kernel/cred.c b/kernel/cred.c
index e12c8af793f8..174fa84eca30 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -1,4 +1,4 @@
1/* Task credentials management - see Documentation/credentials.txt 1/* Task credentials management - see Documentation/security/credentials.txt
2 * 2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
diff --git a/kernel/fork.c b/kernel/fork.c
index 8e7e135d0817..ca406d916713 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -59,7 +59,6 @@
59#include <linux/taskstats_kern.h> 59#include <linux/taskstats_kern.h>
60#include <linux/random.h> 60#include <linux/random.h>
61#include <linux/tty.h> 61#include <linux/tty.h>
62#include <linux/proc_fs.h>
63#include <linux/blkdev.h> 62#include <linux/blkdev.h>
64#include <linux/fs_struct.h> 63#include <linux/fs_struct.h>
65#include <linux/magic.h> 64#include <linux/magic.h>
@@ -597,6 +596,57 @@ void mmput(struct mm_struct *mm)
597} 596}
598EXPORT_SYMBOL_GPL(mmput); 597EXPORT_SYMBOL_GPL(mmput);
599 598
599/*
600 * We added or removed a vma mapping the executable. The vmas are only mapped
601 * during exec and are not mapped with the mmap system call.
602 * Callers must hold down_write() on the mm's mmap_sem for these
603 */
604void added_exe_file_vma(struct mm_struct *mm)
605{
606 mm->num_exe_file_vmas++;
607}
608
609void removed_exe_file_vma(struct mm_struct *mm)
610{
611 mm->num_exe_file_vmas--;
612 if ((mm->num_exe_file_vmas == 0) && mm->exe_file){
613 fput(mm->exe_file);
614 mm->exe_file = NULL;
615 }
616
617}
618
619void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
620{
621 if (new_exe_file)
622 get_file(new_exe_file);
623 if (mm->exe_file)
624 fput(mm->exe_file);
625 mm->exe_file = new_exe_file;
626 mm->num_exe_file_vmas = 0;
627}
628
629struct file *get_mm_exe_file(struct mm_struct *mm)
630{
631 struct file *exe_file;
632
633 /* We need mmap_sem to protect against races with removal of
634 * VM_EXECUTABLE vmas */
635 down_read(&mm->mmap_sem);
636 exe_file = mm->exe_file;
637 if (exe_file)
638 get_file(exe_file);
639 up_read(&mm->mmap_sem);
640 return exe_file;
641}
642
643static void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm)
644{
645 /* It's safe to write the exe_file pointer without exe_file_lock because
646 * this is called during fork when the task is not yet in /proc */
647 newmm->exe_file = get_mm_exe_file(oldmm);
648}
649
600/** 650/**
601 * get_task_mm - acquire a reference to the task's mm 651 * get_task_mm - acquire a reference to the task's mm
602 * 652 *
@@ -957,6 +1007,10 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
957 tty_audit_fork(sig); 1007 tty_audit_fork(sig);
958 sched_autogroup_fork(sig); 1008 sched_autogroup_fork(sig);
959 1009
1010#ifdef CONFIG_CGROUPS
1011 init_rwsem(&sig->threadgroup_fork_lock);
1012#endif
1013
960 sig->oom_adj = current->signal->oom_adj; 1014 sig->oom_adj = current->signal->oom_adj;
961 sig->oom_score_adj = current->signal->oom_score_adj; 1015 sig->oom_score_adj = current->signal->oom_score_adj;
962 sig->oom_score_adj_min = current->signal->oom_score_adj_min; 1016 sig->oom_score_adj_min = current->signal->oom_score_adj_min;
@@ -1138,6 +1192,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1138 monotonic_to_bootbased(&p->real_start_time); 1192 monotonic_to_bootbased(&p->real_start_time);
1139 p->io_context = NULL; 1193 p->io_context = NULL;
1140 p->audit_context = NULL; 1194 p->audit_context = NULL;
1195 if (clone_flags & CLONE_THREAD)
1196 threadgroup_fork_read_lock(current);
1141 cgroup_fork(p); 1197 cgroup_fork(p);
1142#ifdef CONFIG_NUMA 1198#ifdef CONFIG_NUMA
1143 p->mempolicy = mpol_dup(p->mempolicy); 1199 p->mempolicy = mpol_dup(p->mempolicy);
@@ -1223,12 +1279,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1223 if (clone_flags & CLONE_THREAD) 1279 if (clone_flags & CLONE_THREAD)
1224 p->tgid = current->tgid; 1280 p->tgid = current->tgid;
1225 1281
1226 if (current->nsproxy != p->nsproxy) {
1227 retval = ns_cgroup_clone(p, pid);
1228 if (retval)
1229 goto bad_fork_free_pid;
1230 }
1231
1232 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; 1282 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1233 /* 1283 /*
1234 * Clear TID on mm_release()? 1284 * Clear TID on mm_release()?
@@ -1342,6 +1392,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1342 write_unlock_irq(&tasklist_lock); 1392 write_unlock_irq(&tasklist_lock);
1343 proc_fork_connector(p); 1393 proc_fork_connector(p);
1344 cgroup_post_fork(p); 1394 cgroup_post_fork(p);
1395 if (clone_flags & CLONE_THREAD)
1396 threadgroup_fork_read_unlock(current);
1345 perf_event_fork(p); 1397 perf_event_fork(p);
1346 return p; 1398 return p;
1347 1399
@@ -1380,6 +1432,8 @@ bad_fork_cleanup_policy:
1380 mpol_put(p->mempolicy); 1432 mpol_put(p->mempolicy);
1381bad_fork_cleanup_cgroup: 1433bad_fork_cleanup_cgroup:
1382#endif 1434#endif
1435 if (clone_flags & CLONE_THREAD)
1436 threadgroup_fork_read_unlock(current);
1383 cgroup_exit(p, cgroup_callbacks_done); 1437 cgroup_exit(p, cgroup_callbacks_done);
1384 delayacct_tsk_free(p); 1438 delayacct_tsk_free(p);
1385 module_put(task_thread_info(p)->exec_domain->module); 1439 module_put(task_thread_info(p)->exec_domain->module);
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c
deleted file mode 100644
index 2c98ad94ba0e..000000000000
--- a/kernel/ns_cgroup.c
+++ /dev/null
@@ -1,118 +0,0 @@
1/*
2 * ns_cgroup.c - namespace cgroup subsystem
3 *
4 * Copyright 2006, 2007 IBM Corp
5 */
6
7#include <linux/module.h>
8#include <linux/cgroup.h>
9#include <linux/fs.h>
10#include <linux/proc_fs.h>
11#include <linux/slab.h>
12#include <linux/nsproxy.h>
13
14struct ns_cgroup {
15 struct cgroup_subsys_state css;
16};
17
18struct cgroup_subsys ns_subsys;
19
20static inline struct ns_cgroup *cgroup_to_ns(
21 struct cgroup *cgroup)
22{
23 return container_of(cgroup_subsys_state(cgroup, ns_subsys_id),
24 struct ns_cgroup, css);
25}
26
27int ns_cgroup_clone(struct task_struct *task, struct pid *pid)
28{
29 char name[PROC_NUMBUF];
30
31 snprintf(name, PROC_NUMBUF, "%d", pid_vnr(pid));
32 return cgroup_clone(task, &ns_subsys, name);
33}
34
35/*
36 * Rules:
37 * 1. you can only enter a cgroup which is a descendant of your current
38 * cgroup
39 * 2. you can only place another process into a cgroup if
40 * a. you have CAP_SYS_ADMIN
41 * b. your cgroup is an ancestor of task's destination cgroup
42 * (hence either you are in the same cgroup as task, or in an
43 * ancestor cgroup thereof)
44 */
45static int ns_can_attach(struct cgroup_subsys *ss, struct cgroup *new_cgroup,
46 struct task_struct *task, bool threadgroup)
47{
48 if (current != task) {
49 if (!capable(CAP_SYS_ADMIN))
50 return -EPERM;
51
52 if (!cgroup_is_descendant(new_cgroup, current))
53 return -EPERM;
54 }
55
56 if (!cgroup_is_descendant(new_cgroup, task))
57 return -EPERM;
58
59 if (threadgroup) {
60 struct task_struct *c;
61 rcu_read_lock();
62 list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
63 if (!cgroup_is_descendant(new_cgroup, c)) {
64 rcu_read_unlock();
65 return -EPERM;
66 }
67 }
68 rcu_read_unlock();
69 }
70
71 return 0;
72}
73
74/*
75 * Rules: you can only create a cgroup if
76 * 1. you are capable(CAP_SYS_ADMIN)
77 * 2. the target cgroup is a descendant of your own cgroup
78 */
79static struct cgroup_subsys_state *ns_create(struct cgroup_subsys *ss,
80 struct cgroup *cgroup)
81{
82 struct ns_cgroup *ns_cgroup;
83
84 if (!capable(CAP_SYS_ADMIN))
85 return ERR_PTR(-EPERM);
86 if (!cgroup_is_descendant(cgroup, current))
87 return ERR_PTR(-EPERM);
88 if (test_bit(CGRP_CLONE_CHILDREN, &cgroup->flags)) {
89 printk("ns_cgroup can't be created with parent "
90 "'clone_children' set.\n");
91 return ERR_PTR(-EINVAL);
92 }
93
94 printk_once("ns_cgroup deprecated: consider using the "
95 "'clone_children' flag without the ns_cgroup.\n");
96
97 ns_cgroup = kzalloc(sizeof(*ns_cgroup), GFP_KERNEL);
98 if (!ns_cgroup)
99 return ERR_PTR(-ENOMEM);
100 return &ns_cgroup->css;
101}
102
103static void ns_destroy(struct cgroup_subsys *ss,
104 struct cgroup *cgroup)
105{
106 struct ns_cgroup *ns_cgroup;
107
108 ns_cgroup = cgroup_to_ns(cgroup);
109 kfree(ns_cgroup);
110}
111
112struct cgroup_subsys ns_subsys = {
113 .name = "ns",
114 .can_attach = ns_can_attach,
115 .create = ns_create,
116 .destroy = ns_destroy,
117 .subsys_id = ns_subsys_id,
118};
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index 5424e37673ed..d6a00f3de15d 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -201,10 +201,6 @@ int unshare_nsproxy_namespaces(unsigned long unshare_flags,
201 goto out; 201 goto out;
202 } 202 }
203 203
204 err = ns_cgroup_clone(current, task_pid(current));
205 if (err)
206 put_nsproxy(*new_nsp);
207
208out: 204out:
209 return err; 205 return err;
210} 206}
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index beb184689af9..fd8d1e035df9 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -40,6 +40,7 @@
40#include <linux/string.h> 40#include <linux/string.h>
41#include <linux/platform_device.h> 41#include <linux/platform_device.h>
42#include <linux/init.h> 42#include <linux/init.h>
43#include <linux/kernel.h>
43 44
44#include <linux/uaccess.h> 45#include <linux/uaccess.h>
45 46
@@ -404,24 +405,36 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
404 size_t count, loff_t *f_pos) 405 size_t count, loff_t *f_pos)
405{ 406{
406 s32 value; 407 s32 value;
407 int x;
408 char ascii_value[11];
409 struct pm_qos_request_list *pm_qos_req; 408 struct pm_qos_request_list *pm_qos_req;
410 409
411 if (count == sizeof(s32)) { 410 if (count == sizeof(s32)) {
412 if (copy_from_user(&value, buf, sizeof(s32))) 411 if (copy_from_user(&value, buf, sizeof(s32)))
413 return -EFAULT; 412 return -EFAULT;
414 } else if (count == 11) { /* len('0x12345678/0') */ 413 } else if (count <= 11) { /* ASCII perhaps? */
415 if (copy_from_user(ascii_value, buf, 11)) 414 char ascii_value[11];
415 unsigned long int ulval;
416 int ret;
417
418 if (copy_from_user(ascii_value, buf, count))
416 return -EFAULT; 419 return -EFAULT;
417 if (strlen(ascii_value) != 10) 420
418 return -EINVAL; 421 if (count > 10) {
419 x = sscanf(ascii_value, "%x", &value); 422 if (ascii_value[10] == '\n')
420 if (x != 1) 423 ascii_value[10] = '\0';
424 else
425 return -EINVAL;
426 } else {
427 ascii_value[count] = '\0';
428 }
429 ret = strict_strtoul(ascii_value, 16, &ulval);
430 if (ret) {
431 pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret);
421 return -EINVAL; 432 return -EINVAL;
422 pr_debug("%s, %d, 0x%x\n", ascii_value, x, value); 433 }
423 } else 434 value = (s32)lower_32_bits(ulval);
435 } else {
424 return -EINVAL; 436 return -EINVAL;
437 }
425 438
426 pm_qos_req = filp->private_data; 439 pm_qos_req = filp->private_data;
427 pm_qos_update_request(pm_qos_req, value); 440 pm_qos_update_request(pm_qos_req, value);
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index f9bec56d8825..8f7b1db1ece1 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -25,7 +25,6 @@
25#include <linux/gfp.h> 25#include <linux/gfp.h>
26#include <linux/syscore_ops.h> 26#include <linux/syscore_ops.h>
27#include <scsi/scsi_scan.h> 27#include <scsi/scsi_scan.h>
28#include <asm/suspend.h>
29 28
30#include "power.h" 29#include "power.h"
31 30
@@ -55,10 +54,9 @@ static int hibernation_mode = HIBERNATION_SHUTDOWN;
55static const struct platform_hibernation_ops *hibernation_ops; 54static const struct platform_hibernation_ops *hibernation_ops;
56 55
57/** 56/**
58 * hibernation_set_ops - set the global hibernate operations 57 * hibernation_set_ops - Set the global hibernate operations.
59 * @ops: the hibernation operations to use in subsequent hibernation transitions 58 * @ops: Hibernation operations to use in subsequent hibernation transitions.
60 */ 59 */
61
62void hibernation_set_ops(const struct platform_hibernation_ops *ops) 60void hibernation_set_ops(const struct platform_hibernation_ops *ops)
63{ 61{
64 if (ops && !(ops->begin && ops->end && ops->pre_snapshot 62 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
@@ -115,10 +113,9 @@ static int hibernation_test(int level) { return 0; }
115#endif /* !CONFIG_PM_DEBUG */ 113#endif /* !CONFIG_PM_DEBUG */
116 114
117/** 115/**
118 * platform_begin - tell the platform driver that we're starting 116 * platform_begin - Call platform to start hibernation.
119 * hibernation 117 * @platform_mode: Whether or not to use the platform driver.
120 */ 118 */
121
122static int platform_begin(int platform_mode) 119static int platform_begin(int platform_mode)
123{ 120{
124 return (platform_mode && hibernation_ops) ? 121 return (platform_mode && hibernation_ops) ?
@@ -126,10 +123,9 @@ static int platform_begin(int platform_mode)
126} 123}
127 124
128/** 125/**
129 * platform_end - tell the platform driver that we've entered the 126 * platform_end - Call platform to finish transition to the working state.
130 * working state 127 * @platform_mode: Whether or not to use the platform driver.
131 */ 128 */
132
133static void platform_end(int platform_mode) 129static void platform_end(int platform_mode)
134{ 130{
135 if (platform_mode && hibernation_ops) 131 if (platform_mode && hibernation_ops)
@@ -137,8 +133,11 @@ static void platform_end(int platform_mode)
137} 133}
138 134
139/** 135/**
140 * platform_pre_snapshot - prepare the machine for hibernation using the 136 * platform_pre_snapshot - Call platform to prepare the machine for hibernation.
141 * platform driver if so configured and return an error code if it fails 137 * @platform_mode: Whether or not to use the platform driver.
138 *
139 * Use the platform driver to prepare the system for creating a hibernate image,
140 * if so configured, and return an error code if that fails.
142 */ 141 */
143 142
144static int platform_pre_snapshot(int platform_mode) 143static int platform_pre_snapshot(int platform_mode)
@@ -148,10 +147,14 @@ static int platform_pre_snapshot(int platform_mode)
148} 147}
149 148
150/** 149/**
151 * platform_leave - prepare the machine for switching to the normal mode 150 * platform_leave - Call platform to prepare a transition to the working state.
152 * of operation using the platform driver (called with interrupts disabled) 151 * @platform_mode: Whether or not to use the platform driver.
152 *
153 * Use the platform driver prepare to prepare the machine for switching to the
154 * normal mode of operation.
155 *
156 * This routine is called on one CPU with interrupts disabled.
153 */ 157 */
154
155static void platform_leave(int platform_mode) 158static void platform_leave(int platform_mode)
156{ 159{
157 if (platform_mode && hibernation_ops) 160 if (platform_mode && hibernation_ops)
@@ -159,10 +162,14 @@ static void platform_leave(int platform_mode)
159} 162}
160 163
161/** 164/**
162 * platform_finish - switch the machine to the normal mode of operation 165 * platform_finish - Call platform to switch the system to the working state.
163 * using the platform driver (must be called after platform_prepare()) 166 * @platform_mode: Whether or not to use the platform driver.
167 *
168 * Use the platform driver to switch the machine to the normal mode of
169 * operation.
170 *
171 * This routine must be called after platform_prepare().
164 */ 172 */
165
166static void platform_finish(int platform_mode) 173static void platform_finish(int platform_mode)
167{ 174{
168 if (platform_mode && hibernation_ops) 175 if (platform_mode && hibernation_ops)
@@ -170,11 +177,15 @@ static void platform_finish(int platform_mode)
170} 177}
171 178
172/** 179/**
173 * platform_pre_restore - prepare the platform for the restoration from a 180 * platform_pre_restore - Prepare for hibernate image restoration.
174 * hibernation image. If the restore fails after this function has been 181 * @platform_mode: Whether or not to use the platform driver.
175 * called, platform_restore_cleanup() must be called. 182 *
183 * Use the platform driver to prepare the system for resume from a hibernation
184 * image.
185 *
186 * If the restore fails after this function has been called,
187 * platform_restore_cleanup() must be called.
176 */ 188 */
177
178static int platform_pre_restore(int platform_mode) 189static int platform_pre_restore(int platform_mode)
179{ 190{
180 return (platform_mode && hibernation_ops) ? 191 return (platform_mode && hibernation_ops) ?
@@ -182,12 +193,16 @@ static int platform_pre_restore(int platform_mode)
182} 193}
183 194
184/** 195/**
185 * platform_restore_cleanup - switch the platform to the normal mode of 196 * platform_restore_cleanup - Switch to the working state after failing restore.
186 * operation after a failing restore. If platform_pre_restore() has been 197 * @platform_mode: Whether or not to use the platform driver.
187 * called before the failing restore, this function must be called too, 198 *
188 * regardless of the result of platform_pre_restore(). 199 * Use the platform driver to switch the system to the normal mode of operation
200 * after a failing restore.
201 *
202 * If platform_pre_restore() has been called before the failing restore, this
203 * function must be called too, regardless of the result of
204 * platform_pre_restore().
189 */ 205 */
190
191static void platform_restore_cleanup(int platform_mode) 206static void platform_restore_cleanup(int platform_mode)
192{ 207{
193 if (platform_mode && hibernation_ops) 208 if (platform_mode && hibernation_ops)
@@ -195,10 +210,9 @@ static void platform_restore_cleanup(int platform_mode)
195} 210}
196 211
197/** 212/**
198 * platform_recover - recover the platform from a failure to suspend 213 * platform_recover - Recover from a failure to suspend devices.
199 * devices. 214 * @platform_mode: Whether or not to use the platform driver.
200 */ 215 */
201
202static void platform_recover(int platform_mode) 216static void platform_recover(int platform_mode)
203{ 217{
204 if (platform_mode && hibernation_ops && hibernation_ops->recover) 218 if (platform_mode && hibernation_ops && hibernation_ops->recover)
@@ -206,13 +220,12 @@ static void platform_recover(int platform_mode)
206} 220}
207 221
208/** 222/**
209 * swsusp_show_speed - print the time elapsed between two events. 223 * swsusp_show_speed - Print time elapsed between two events during hibernation.
210 * @start: Starting event. 224 * @start: Starting event.
211 * @stop: Final event. 225 * @stop: Final event.
212 * @nr_pages - number of pages processed between @start and @stop 226 * @nr_pages: Number of memory pages processed between @start and @stop.
213 * @msg - introductory message to print 227 * @msg: Additional diagnostic message to print.
214 */ 228 */
215
216void swsusp_show_speed(struct timeval *start, struct timeval *stop, 229void swsusp_show_speed(struct timeval *start, struct timeval *stop,
217 unsigned nr_pages, char *msg) 230 unsigned nr_pages, char *msg)
218{ 231{
@@ -235,25 +248,18 @@ void swsusp_show_speed(struct timeval *start, struct timeval *stop,
235} 248}
236 249
237/** 250/**
238 * create_image - freeze devices that need to be frozen with interrupts 251 * create_image - Create a hibernation image.
239 * off, create the hibernation image and thaw those devices. Control 252 * @platform_mode: Whether or not to use the platform driver.
240 * reappears in this routine after a restore. 253 *
254 * Execute device drivers' .freeze_noirq() callbacks, create a hibernation image
255 * and execute the drivers' .thaw_noirq() callbacks.
256 *
257 * Control reappears in this routine after the subsequent restore.
241 */ 258 */
242
243static int create_image(int platform_mode) 259static int create_image(int platform_mode)
244{ 260{
245 int error; 261 int error;
246 262
247 error = arch_prepare_suspend();
248 if (error)
249 return error;
250
251 /* At this point, dpm_suspend_start() has been called, but *not*
252 * dpm_suspend_noirq(). We *must* call dpm_suspend_noirq() now.
253 * Otherwise, drivers for some devices (e.g. interrupt controllers)
254 * become desynchronized with the actual state of the hardware
255 * at resume time, and evil weirdness ensues.
256 */
257 error = dpm_suspend_noirq(PMSG_FREEZE); 263 error = dpm_suspend_noirq(PMSG_FREEZE);
258 if (error) { 264 if (error) {
259 printk(KERN_ERR "PM: Some devices failed to power down, " 265 printk(KERN_ERR "PM: Some devices failed to power down, "
@@ -297,9 +303,6 @@ static int create_image(int platform_mode)
297 303
298 Power_up: 304 Power_up:
299 syscore_resume(); 305 syscore_resume();
300 /* NOTE: dpm_resume_noirq() is just a resume() for devices
301 * that suspended with irqs off ... no overall powerup.
302 */
303 306
304 Enable_irqs: 307 Enable_irqs:
305 local_irq_enable(); 308 local_irq_enable();
@@ -317,14 +320,11 @@ static int create_image(int platform_mode)
317} 320}
318 321
319/** 322/**
320 * hibernation_snapshot - quiesce devices and create the hibernation 323 * hibernation_snapshot - Quiesce devices and create a hibernation image.
321 * snapshot image. 324 * @platform_mode: If set, use platform driver to prepare for the transition.
322 * @platform_mode - if set, use the platform driver, if available, to
323 * prepare the platform firmware for the power transition.
324 * 325 *
325 * Must be called with pm_mutex held 326 * This routine must be called with pm_mutex held.
326 */ 327 */
327
328int hibernation_snapshot(int platform_mode) 328int hibernation_snapshot(int platform_mode)
329{ 329{
330 pm_message_t msg = PMSG_RECOVER; 330 pm_message_t msg = PMSG_RECOVER;
@@ -384,13 +384,14 @@ int hibernation_snapshot(int platform_mode)
384} 384}
385 385
386/** 386/**
387 * resume_target_kernel - prepare devices that need to be suspended with 387 * resume_target_kernel - Restore system state from a hibernation image.
388 * interrupts off, restore the contents of highmem that have not been 388 * @platform_mode: Whether or not to use the platform driver.
389 * restored yet from the image and run the low level code that will restore 389 *
390 * the remaining contents of memory and switch to the just restored target 390 * Execute device drivers' .freeze_noirq() callbacks, restore the contents of
391 * kernel. 391 * highmem that have not been restored yet from the image and run the low-level
392 * code that will restore the remaining contents of memory and switch to the
393 * just restored target kernel.
392 */ 394 */
393
394static int resume_target_kernel(bool platform_mode) 395static int resume_target_kernel(bool platform_mode)
395{ 396{
396 int error; 397 int error;
@@ -416,24 +417,26 @@ static int resume_target_kernel(bool platform_mode)
416 if (error) 417 if (error)
417 goto Enable_irqs; 418 goto Enable_irqs;
418 419
419 /* We'll ignore saved state, but this gets preempt count (etc) right */
420 save_processor_state(); 420 save_processor_state();
421 error = restore_highmem(); 421 error = restore_highmem();
422 if (!error) { 422 if (!error) {
423 error = swsusp_arch_resume(); 423 error = swsusp_arch_resume();
424 /* 424 /*
425 * The code below is only ever reached in case of a failure. 425 * The code below is only ever reached in case of a failure.
426 * Otherwise execution continues at place where 426 * Otherwise, execution continues at the place where
427 * swsusp_arch_suspend() was called 427 * swsusp_arch_suspend() was called.
428 */ 428 */
429 BUG_ON(!error); 429 BUG_ON(!error);
430 /* This call to restore_highmem() undos the previous one */ 430 /*
431 * This call to restore_highmem() reverts the changes made by
432 * the previous one.
433 */
431 restore_highmem(); 434 restore_highmem();
432 } 435 }
433 /* 436 /*
434 * The only reason why swsusp_arch_resume() can fail is memory being 437 * The only reason why swsusp_arch_resume() can fail is memory being
435 * very tight, so we have to free it as soon as we can to avoid 438 * very tight, so we have to free it as soon as we can to avoid
436 * subsequent failures 439 * subsequent failures.
437 */ 440 */
438 swsusp_free(); 441 swsusp_free();
439 restore_processor_state(); 442 restore_processor_state();
@@ -456,14 +459,12 @@ static int resume_target_kernel(bool platform_mode)
456} 459}
457 460
458/** 461/**
459 * hibernation_restore - quiesce devices and restore the hibernation 462 * hibernation_restore - Quiesce devices and restore from a hibernation image.
460 * snapshot image. If successful, control returns in hibernation_snaphot() 463 * @platform_mode: If set, use platform driver to prepare for the transition.
461 * @platform_mode - if set, use the platform driver, if available, to
462 * prepare the platform firmware for the transition.
463 * 464 *
464 * Must be called with pm_mutex held 465 * This routine must be called with pm_mutex held. If it is successful, control
466 * reappears in the restored target kernel in hibernation_snaphot().
465 */ 467 */
466
467int hibernation_restore(int platform_mode) 468int hibernation_restore(int platform_mode)
468{ 469{
469 int error; 470 int error;
@@ -483,10 +484,8 @@ int hibernation_restore(int platform_mode)
483} 484}
484 485
485/** 486/**
486 * hibernation_platform_enter - enter the hibernation state using the 487 * hibernation_platform_enter - Power off the system using the platform driver.
487 * platform driver (if available)
488 */ 488 */
489
490int hibernation_platform_enter(void) 489int hibernation_platform_enter(void)
491{ 490{
492 int error; 491 int error;
@@ -557,12 +556,12 @@ int hibernation_platform_enter(void)
557} 556}
558 557
559/** 558/**
560 * power_down - Shut the machine down for hibernation. 559 * power_down - Shut the machine down for hibernation.
561 * 560 *
562 * Use the platform driver, if configured so; otherwise try 561 * Use the platform driver, if configured, to put the system into the sleep
563 * to power off or reboot. 562 * state corresponding to hibernation, or try to power it off or reboot,
563 * depending on the value of hibernation_mode.
564 */ 564 */
565
566static void power_down(void) 565static void power_down(void)
567{ 566{
568 switch (hibernation_mode) { 567 switch (hibernation_mode) {
@@ -599,9 +598,8 @@ static int prepare_processes(void)
599} 598}
600 599
601/** 600/**
602 * hibernate - The granpappy of the built-in hibernation management 601 * hibernate - Carry out system hibernation, including saving the image.
603 */ 602 */
604
605int hibernate(void) 603int hibernate(void)
606{ 604{
607 int error; 605 int error;
@@ -679,17 +677,20 @@ int hibernate(void)
679 677
680 678
681/** 679/**
682 * software_resume - Resume from a saved image. 680 * software_resume - Resume from a saved hibernation image.
683 * 681 *
684 * Called as a late_initcall (so all devices are discovered and 682 * This routine is called as a late initcall, when all devices have been
685 * initialized), we call swsusp to see if we have a saved image or not. 683 * discovered and initialized already.
686 * If so, we quiesce devices, the restore the saved image. We will
687 * return above (in hibernate() ) if everything goes well.
688 * Otherwise, we fail gracefully and return to the normally
689 * scheduled program.
690 * 684 *
685 * The image reading code is called to see if there is a hibernation image
686 * available for reading. If that is the case, devices are quiesced and the
687 * contents of memory is restored from the saved image.
688 *
689 * If this is successful, control reappears in the restored target kernel in
690 * hibernation_snaphot() which returns to hibernate(). Otherwise, the routine
691 * attempts to recover gracefully and make the kernel return to the normal mode
692 * of operation.
691 */ 693 */
692
693static int software_resume(void) 694static int software_resume(void)
694{ 695{
695 int error; 696 int error;
@@ -819,21 +820,17 @@ static const char * const hibernation_modes[] = {
819 [HIBERNATION_TESTPROC] = "testproc", 820 [HIBERNATION_TESTPROC] = "testproc",
820}; 821};
821 822
822/** 823/*
823 * disk - Control hibernation mode 824 * /sys/power/disk - Control hibernation mode.
824 *
825 * Suspend-to-disk can be handled in several ways. We have a few options
826 * for putting the system to sleep - using the platform driver (e.g. ACPI
827 * or other hibernation_ops), powering off the system or rebooting the
828 * system (for testing) as well as the two test modes.
829 * 825 *
830 * The system can support 'platform', and that is known a priori (and 826 * Hibernation can be handled in several ways. There are a few different ways
831 * encoded by the presence of hibernation_ops). However, the user may 827 * to put the system into the sleep state: using the platform driver (e.g. ACPI
832 * choose 'shutdown' or 'reboot' as alternatives, as well as one fo the 828 * or other hibernation_ops), powering it off or rebooting it (for testing
833 * test modes, 'test' or 'testproc'. 829 * mostly), or using one of the two available test modes.
834 * 830 *
835 * show() will display what the mode is currently set to. 831 * The sysfs file /sys/power/disk provides an interface for selecting the
836 * store() will accept one of 832 * hibernation mode to use. Reading from this file causes the available modes
833 * to be printed. There are 5 modes that can be supported:
837 * 834 *
838 * 'platform' 835 * 'platform'
839 * 'shutdown' 836 * 'shutdown'
@@ -841,8 +838,14 @@ static const char * const hibernation_modes[] = {
841 * 'test' 838 * 'test'
842 * 'testproc' 839 * 'testproc'
843 * 840 *
844 * It will only change to 'platform' if the system 841 * If a platform hibernation driver is in use, 'platform' will be supported
845 * supports it (as determined by having hibernation_ops). 842 * and will be used by default. Otherwise, 'shutdown' will be used by default.
843 * The selected option (i.e. the one corresponding to the current value of
844 * hibernation_mode) is enclosed by a square bracket.
845 *
846 * To select a given hibernation mode it is necessary to write the mode's
847 * string representation (as returned by reading from /sys/power/disk) back
848 * into /sys/power/disk.
846 */ 849 */
847 850
848static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr, 851static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
@@ -875,7 +878,6 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
875 return buf-start; 878 return buf-start;
876} 879}
877 880
878
879static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr, 881static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
880 const char *buf, size_t n) 882 const char *buf, size_t n)
881{ 883{
diff --git a/kernel/profile.c b/kernel/profile.c
index 14c9f87b9fc9..961b389fe52f 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -303,14 +303,12 @@ static void profile_discard_flip_buffers(void)
303 mutex_unlock(&profile_flip_mutex); 303 mutex_unlock(&profile_flip_mutex);
304} 304}
305 305
306void profile_hits(int type, void *__pc, unsigned int nr_hits) 306static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
307{ 307{
308 unsigned long primary, secondary, flags, pc = (unsigned long)__pc; 308 unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
309 int i, j, cpu; 309 int i, j, cpu;
310 struct profile_hit *hits; 310 struct profile_hit *hits;
311 311
312 if (prof_on != type || !prof_buffer)
313 return;
314 pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1); 312 pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);
315 i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; 313 i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
316 secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; 314 secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
@@ -417,16 +415,20 @@ out_free:
417#define profile_discard_flip_buffers() do { } while (0) 415#define profile_discard_flip_buffers() do { } while (0)
418#define profile_cpu_callback NULL 416#define profile_cpu_callback NULL
419 417
420void profile_hits(int type, void *__pc, unsigned int nr_hits) 418static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
421{ 419{
422 unsigned long pc; 420 unsigned long pc;
423
424 if (prof_on != type || !prof_buffer)
425 return;
426 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; 421 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
427 atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); 422 atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
428} 423}
429#endif /* !CONFIG_SMP */ 424#endif /* !CONFIG_SMP */
425
426void profile_hits(int type, void *__pc, unsigned int nr_hits)
427{
428 if (prof_on != type || !prof_buffer)
429 return;
430 do_profile_hits(type, __pc, nr_hits);
431}
430EXPORT_SYMBOL_GPL(profile_hits); 432EXPORT_SYMBOL_GPL(profile_hits);
431 433
432void profile_tick(int type) 434void profile_tick(int type)
diff --git a/kernel/sched.c b/kernel/sched.c
index 2d12893b8b0f..5e43e9dc65d1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8764,42 +8764,10 @@ cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
8764 return 0; 8764 return 0;
8765} 8765}
8766 8766
8767static int
8768cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
8769 struct task_struct *tsk, bool threadgroup)
8770{
8771 int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
8772 if (retval)
8773 return retval;
8774 if (threadgroup) {
8775 struct task_struct *c;
8776 rcu_read_lock();
8777 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
8778 retval = cpu_cgroup_can_attach_task(cgrp, c);
8779 if (retval) {
8780 rcu_read_unlock();
8781 return retval;
8782 }
8783 }
8784 rcu_read_unlock();
8785 }
8786 return 0;
8787}
8788
8789static void 8767static void
8790cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 8768cpu_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
8791 struct cgroup *old_cont, struct task_struct *tsk,
8792 bool threadgroup)
8793{ 8769{
8794 sched_move_task(tsk); 8770 sched_move_task(tsk);
8795 if (threadgroup) {
8796 struct task_struct *c;
8797 rcu_read_lock();
8798 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
8799 sched_move_task(c);
8800 }
8801 rcu_read_unlock();
8802 }
8803} 8771}
8804 8772
8805static void 8773static void
@@ -8887,8 +8855,8 @@ struct cgroup_subsys cpu_cgroup_subsys = {
8887 .name = "cpu", 8855 .name = "cpu",
8888 .create = cpu_cgroup_create, 8856 .create = cpu_cgroup_create,
8889 .destroy = cpu_cgroup_destroy, 8857 .destroy = cpu_cgroup_destroy,
8890 .can_attach = cpu_cgroup_can_attach, 8858 .can_attach_task = cpu_cgroup_can_attach_task,
8891 .attach = cpu_cgroup_attach, 8859 .attach_task = cpu_cgroup_attach_task,
8892 .exit = cpu_cgroup_exit, 8860 .exit = cpu_cgroup_exit,
8893 .populate = cpu_cgroup_populate, 8861 .populate = cpu_cgroup_populate,
8894 .subsys_id = cpu_cgroup_subsys_id, 8862 .subsys_id = cpu_cgroup_subsys_id,
diff --git a/lib/Kconfig b/lib/Kconfig
index 9c10e38fc609..830181cc7a83 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -19,16 +19,6 @@ config RATIONAL
19config GENERIC_FIND_FIRST_BIT 19config GENERIC_FIND_FIRST_BIT
20 bool 20 bool
21 21
22config GENERIC_FIND_NEXT_BIT
23 bool
24
25config GENERIC_FIND_BIT_LE
26 bool
27
28config GENERIC_FIND_LAST_BIT
29 bool
30 default y
31
32config CRC_CCITT 22config CRC_CCITT
33 tristate "CRC-CCITT functions" 23 tristate "CRC-CCITT functions"
34 help 24 help
diff --git a/lib/Makefile b/lib/Makefile
index 4b49a249064b..6b597fdb1898 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -12,7 +12,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
12 idr.o int_sqrt.o extable.o prio_tree.o \ 12 idr.o int_sqrt.o extable.o prio_tree.o \
13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o prio_heap.o ratelimit.o show_mem.o \ 14 proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o 15 is_single_threaded.o plist.o decompress.o find_next_bit.o
16 16
17lib-$(CONFIG_MMU) += ioremap.o 17lib-$(CONFIG_MMU) += ioremap.o
18lib-$(CONFIG_SMP) += cpumask.o 18lib-$(CONFIG_SMP) += cpumask.o
@@ -22,7 +22,7 @@ lib-y += kobject.o kref.o klist.o
22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
24 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ 24 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \
25 bsearch.o 25 bsearch.o find_last_bit.o
26obj-y += kstrtox.o 26obj-y += kstrtox.o
27obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 27obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
28 28
@@ -39,10 +39,6 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
39obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o 39obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
40lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o 40lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
41lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 41lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
42lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
43lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
44lib-$(CONFIG_GENERIC_FIND_BIT_LE) += find_next_bit.o
45obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
46 42
47CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) 43CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
48obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 44obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
diff --git a/lib/find_last_bit.c b/lib/find_last_bit.c
index 5d202e36bdd8..d903959ad695 100644
--- a/lib/find_last_bit.c
+++ b/lib/find_last_bit.c
@@ -15,6 +15,8 @@
15#include <asm/types.h> 15#include <asm/types.h>
16#include <asm/byteorder.h> 16#include <asm/byteorder.h>
17 17
18#ifndef find_last_bit
19
18unsigned long find_last_bit(const unsigned long *addr, unsigned long size) 20unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
19{ 21{
20 unsigned long words; 22 unsigned long words;
@@ -43,3 +45,5 @@ found:
43 return size; 45 return size;
44} 46}
45EXPORT_SYMBOL(find_last_bit); 47EXPORT_SYMBOL(find_last_bit);
48
49#endif
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
index b0a8767282bf..4bd75a73ba00 100644
--- a/lib/find_next_bit.c
+++ b/lib/find_next_bit.c
@@ -16,7 +16,7 @@
16 16
17#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) 17#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
18 18
19#ifdef CONFIG_GENERIC_FIND_NEXT_BIT 19#ifndef find_next_bit
20/* 20/*
21 * Find the next set bit in a memory region. 21 * Find the next set bit in a memory region.
22 */ 22 */
@@ -59,7 +59,9 @@ found_middle:
59 return result + __ffs(tmp); 59 return result + __ffs(tmp);
60} 60}
61EXPORT_SYMBOL(find_next_bit); 61EXPORT_SYMBOL(find_next_bit);
62#endif
62 63
64#ifndef find_next_zero_bit
63/* 65/*
64 * This implementation of find_{first,next}_zero_bit was stolen from 66 * This implementation of find_{first,next}_zero_bit was stolen from
65 * Linus' asm-alpha/bitops.h. 67 * Linus' asm-alpha/bitops.h.
@@ -103,9 +105,9 @@ found_middle:
103 return result + ffz(tmp); 105 return result + ffz(tmp);
104} 106}
105EXPORT_SYMBOL(find_next_zero_bit); 107EXPORT_SYMBOL(find_next_zero_bit);
106#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ 108#endif
107 109
108#ifdef CONFIG_GENERIC_FIND_FIRST_BIT 110#ifndef find_first_bit
109/* 111/*
110 * Find the first set bit in a memory region. 112 * Find the first set bit in a memory region.
111 */ 113 */
@@ -131,7 +133,9 @@ found:
131 return result + __ffs(tmp); 133 return result + __ffs(tmp);
132} 134}
133EXPORT_SYMBOL(find_first_bit); 135EXPORT_SYMBOL(find_first_bit);
136#endif
134 137
138#ifndef find_first_zero_bit
135/* 139/*
136 * Find the first cleared bit in a memory region. 140 * Find the first cleared bit in a memory region.
137 */ 141 */
@@ -157,10 +161,9 @@ found:
157 return result + ffz(tmp); 161 return result + ffz(tmp);
158} 162}
159EXPORT_SYMBOL(find_first_zero_bit); 163EXPORT_SYMBOL(find_first_zero_bit);
160#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ 164#endif
161 165
162#ifdef __BIG_ENDIAN 166#ifdef __BIG_ENDIAN
163#ifdef CONFIG_GENERIC_FIND_BIT_LE
164 167
165/* include/linux/byteorder does not support "unsigned long" type */ 168/* include/linux/byteorder does not support "unsigned long" type */
166static inline unsigned long ext2_swabp(const unsigned long * x) 169static inline unsigned long ext2_swabp(const unsigned long * x)
@@ -186,6 +189,7 @@ static inline unsigned long ext2_swab(const unsigned long y)
186#endif 189#endif
187} 190}
188 191
192#ifndef find_next_zero_bit_le
189unsigned long find_next_zero_bit_le(const void *addr, unsigned 193unsigned long find_next_zero_bit_le(const void *addr, unsigned
190 long size, unsigned long offset) 194 long size, unsigned long offset)
191{ 195{
@@ -229,7 +233,9 @@ found_middle_swap:
229 return result + ffz(ext2_swab(tmp)); 233 return result + ffz(ext2_swab(tmp));
230} 234}
231EXPORT_SYMBOL(find_next_zero_bit_le); 235EXPORT_SYMBOL(find_next_zero_bit_le);
236#endif
232 237
238#ifndef find_next_bit_le
233unsigned long find_next_bit_le(const void *addr, unsigned 239unsigned long find_next_bit_le(const void *addr, unsigned
234 long size, unsigned long offset) 240 long size, unsigned long offset)
235{ 241{
@@ -274,6 +280,6 @@ found_middle_swap:
274 return result + __ffs(ext2_swab(tmp)); 280 return result + __ffs(ext2_swab(tmp));
275} 281}
276EXPORT_SYMBOL(find_next_bit_le); 282EXPORT_SYMBOL(find_next_bit_le);
283#endif
277 284
278#endif /* CONFIG_GENERIC_FIND_BIT_LE */
279#endif /* __BIG_ENDIAN */ 285#endif /* __BIG_ENDIAN */
diff --git a/lib/flex_array.c b/lib/flex_array.c
index cab7621f98aa..9b8b89458c4c 100644
--- a/lib/flex_array.c
+++ b/lib/flex_array.c
@@ -24,6 +24,7 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/stddef.h> 25#include <linux/stddef.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/reciprocal_div.h>
27 28
28struct flex_array_part { 29struct flex_array_part {
29 char elements[FLEX_ARRAY_PART_SIZE]; 30 char elements[FLEX_ARRAY_PART_SIZE];
@@ -70,15 +71,15 @@ static inline int elements_fit_in_base(struct flex_array *fa)
70 * Element size | Objects | Objects | 71 * Element size | Objects | Objects |
71 * PAGE_SIZE=4k | 32-bit | 64-bit | 72 * PAGE_SIZE=4k | 32-bit | 64-bit |
72 * ---------------------------------| 73 * ---------------------------------|
73 * 1 bytes | 4186112 | 2093056 | 74 * 1 bytes | 4177920 | 2088960 |
74 * 2 bytes | 2093056 | 1046528 | 75 * 2 bytes | 2088960 | 1044480 |
75 * 3 bytes | 1395030 | 697515 | 76 * 3 bytes | 1392300 | 696150 |
76 * 4 bytes | 1046528 | 523264 | 77 * 4 bytes | 1044480 | 522240 |
77 * 32 bytes | 130816 | 65408 | 78 * 32 bytes | 130560 | 65408 |
78 * 33 bytes | 126728 | 63364 | 79 * 33 bytes | 126480 | 63240 |
79 * 2048 bytes | 2044 | 1022 | 80 * 2048 bytes | 2040 | 1020 |
80 * 2049 bytes | 1022 | 511 | 81 * 2049 bytes | 1020 | 510 |
81 * void * | 1046528 | 261632 | 82 * void * | 1044480 | 261120 |
82 * 83 *
83 * Since 64-bit pointers are twice the size, we lose half the 84 * Since 64-bit pointers are twice the size, we lose half the
84 * capacity in the base structure. Also note that no effort is made 85 * capacity in the base structure. Also note that no effort is made
@@ -88,11 +89,15 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
88 gfp_t flags) 89 gfp_t flags)
89{ 90{
90 struct flex_array *ret; 91 struct flex_array *ret;
92 int elems_per_part = 0;
93 int reciprocal_elems = 0;
91 int max_size = 0; 94 int max_size = 0;
92 95
93 if (element_size) 96 if (element_size) {
94 max_size = FLEX_ARRAY_NR_BASE_PTRS * 97 elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size);
95 FLEX_ARRAY_ELEMENTS_PER_PART(element_size); 98 reciprocal_elems = reciprocal_value(elems_per_part);
99 max_size = FLEX_ARRAY_NR_BASE_PTRS * elems_per_part;
100 }
96 101
97 /* max_size will end up 0 if element_size > PAGE_SIZE */ 102 /* max_size will end up 0 if element_size > PAGE_SIZE */
98 if (total > max_size) 103 if (total > max_size)
@@ -102,6 +107,8 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
102 return NULL; 107 return NULL;
103 ret->element_size = element_size; 108 ret->element_size = element_size;
104 ret->total_nr_elements = total; 109 ret->total_nr_elements = total;
110 ret->elems_per_part = elems_per_part;
111 ret->reciprocal_elems = reciprocal_elems;
105 if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) 112 if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
106 memset(&ret->parts[0], FLEX_ARRAY_FREE, 113 memset(&ret->parts[0], FLEX_ARRAY_FREE,
107 FLEX_ARRAY_BASE_BYTES_LEFT); 114 FLEX_ARRAY_BASE_BYTES_LEFT);
@@ -112,7 +119,7 @@ EXPORT_SYMBOL(flex_array_alloc);
112static int fa_element_to_part_nr(struct flex_array *fa, 119static int fa_element_to_part_nr(struct flex_array *fa,
113 unsigned int element_nr) 120 unsigned int element_nr)
114{ 121{
115 return element_nr / FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size); 122 return reciprocal_divide(element_nr, fa->reciprocal_elems);
116} 123}
117 124
118/** 125/**
@@ -141,12 +148,12 @@ void flex_array_free(struct flex_array *fa)
141EXPORT_SYMBOL(flex_array_free); 148EXPORT_SYMBOL(flex_array_free);
142 149
143static unsigned int index_inside_part(struct flex_array *fa, 150static unsigned int index_inside_part(struct flex_array *fa,
144 unsigned int element_nr) 151 unsigned int element_nr,
152 unsigned int part_nr)
145{ 153{
146 unsigned int part_offset; 154 unsigned int part_offset;
147 155
148 part_offset = element_nr % 156 part_offset = element_nr - part_nr * fa->elems_per_part;
149 FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size);
150 return part_offset * fa->element_size; 157 return part_offset * fa->element_size;
151} 158}
152 159
@@ -186,7 +193,7 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
186int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, 193int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
187 gfp_t flags) 194 gfp_t flags)
188{ 195{
189 int part_nr; 196 int part_nr = 0;
190 struct flex_array_part *part; 197 struct flex_array_part *part;
191 void *dst; 198 void *dst;
192 199
@@ -202,7 +209,7 @@ int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
202 if (!part) 209 if (!part)
203 return -ENOMEM; 210 return -ENOMEM;
204 } 211 }
205 dst = &part->elements[index_inside_part(fa, element_nr)]; 212 dst = &part->elements[index_inside_part(fa, element_nr, part_nr)];
206 memcpy(dst, src, fa->element_size); 213 memcpy(dst, src, fa->element_size);
207 return 0; 214 return 0;
208} 215}
@@ -217,7 +224,7 @@ EXPORT_SYMBOL(flex_array_put);
217 */ 224 */
218int flex_array_clear(struct flex_array *fa, unsigned int element_nr) 225int flex_array_clear(struct flex_array *fa, unsigned int element_nr)
219{ 226{
220 int part_nr; 227 int part_nr = 0;
221 struct flex_array_part *part; 228 struct flex_array_part *part;
222 void *dst; 229 void *dst;
223 230
@@ -233,7 +240,7 @@ int flex_array_clear(struct flex_array *fa, unsigned int element_nr)
233 if (!part) 240 if (!part)
234 return -EINVAL; 241 return -EINVAL;
235 } 242 }
236 dst = &part->elements[index_inside_part(fa, element_nr)]; 243 dst = &part->elements[index_inside_part(fa, element_nr, part_nr)];
237 memset(dst, FLEX_ARRAY_FREE, fa->element_size); 244 memset(dst, FLEX_ARRAY_FREE, fa->element_size);
238 return 0; 245 return 0;
239} 246}
@@ -302,7 +309,7 @@ EXPORT_SYMBOL(flex_array_prealloc);
302 */ 309 */
303void *flex_array_get(struct flex_array *fa, unsigned int element_nr) 310void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
304{ 311{
305 int part_nr; 312 int part_nr = 0;
306 struct flex_array_part *part; 313 struct flex_array_part *part;
307 314
308 if (!fa->element_size) 315 if (!fa->element_size)
@@ -317,7 +324,7 @@ void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
317 if (!part) 324 if (!part)
318 return NULL; 325 return NULL;
319 } 326 }
320 return &part->elements[index_inside_part(fa, element_nr)]; 327 return &part->elements[index_inside_part(fa, element_nr, part_nr)];
321} 328}
322EXPORT_SYMBOL(flex_array_get); 329EXPORT_SYMBOL(flex_array_get);
323 330
diff --git a/mm/filemap.c b/mm/filemap.c
index 7455ccd8bda8..bcdc393b6580 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1661,6 +1661,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1661 /* No page in the page cache at all */ 1661 /* No page in the page cache at all */
1662 do_sync_mmap_readahead(vma, ra, file, offset); 1662 do_sync_mmap_readahead(vma, ra, file, offset);
1663 count_vm_event(PGMAJFAULT); 1663 count_vm_event(PGMAJFAULT);
1664 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1664 ret = VM_FAULT_MAJOR; 1665 ret = VM_FAULT_MAJOR;
1665retry_find: 1666retry_find:
1666 page = find_get_page(mapping, offset); 1667 page = find_get_page(mapping, offset);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d5fd3dcd3f2e..bd9052a5d3ad 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -94,6 +94,8 @@ enum mem_cgroup_events_index {
94 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ 94 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
95 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ 95 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
96 MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */ 96 MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */
97 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
98 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
97 MEM_CGROUP_EVENTS_NSTATS, 99 MEM_CGROUP_EVENTS_NSTATS,
98}; 100};
99/* 101/*
@@ -231,6 +233,11 @@ struct mem_cgroup {
231 * reclaimed from. 233 * reclaimed from.
232 */ 234 */
233 int last_scanned_child; 235 int last_scanned_child;
236 int last_scanned_node;
237#if MAX_NUMNODES > 1
238 nodemask_t scan_nodes;
239 unsigned long next_scan_node_update;
240#endif
234 /* 241 /*
235 * Should the accounting and control be hierarchical, per subtree? 242 * Should the accounting and control be hierarchical, per subtree?
236 */ 243 */
@@ -585,6 +592,16 @@ static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
585 this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val); 592 this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
586} 593}
587 594
595void mem_cgroup_pgfault(struct mem_cgroup *mem, int val)
596{
597 this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val);
598}
599
600void mem_cgroup_pgmajfault(struct mem_cgroup *mem, int val)
601{
602 this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val);
603}
604
588static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem, 605static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem,
589 enum mem_cgroup_events_index idx) 606 enum mem_cgroup_events_index idx)
590{ 607{
@@ -624,18 +641,27 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
624 preempt_enable(); 641 preempt_enable();
625} 642}
626 643
644static unsigned long
645mem_cgroup_get_zonestat_node(struct mem_cgroup *mem, int nid, enum lru_list idx)
646{
647 struct mem_cgroup_per_zone *mz;
648 u64 total = 0;
649 int zid;
650
651 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
652 mz = mem_cgroup_zoneinfo(mem, nid, zid);
653 total += MEM_CGROUP_ZSTAT(mz, idx);
654 }
655 return total;
656}
627static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem, 657static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
628 enum lru_list idx) 658 enum lru_list idx)
629{ 659{
630 int nid, zid; 660 int nid;
631 struct mem_cgroup_per_zone *mz;
632 u64 total = 0; 661 u64 total = 0;
633 662
634 for_each_online_node(nid) 663 for_each_online_node(nid)
635 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 664 total += mem_cgroup_get_zonestat_node(mem, nid, idx);
636 mz = mem_cgroup_zoneinfo(mem, nid, zid);
637 total += MEM_CGROUP_ZSTAT(mz, idx);
638 }
639 return total; 665 return total;
640} 666}
641 667
@@ -813,6 +839,33 @@ static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
813 return (mem == root_mem_cgroup); 839 return (mem == root_mem_cgroup);
814} 840}
815 841
842void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
843{
844 struct mem_cgroup *mem;
845
846 if (!mm)
847 return;
848
849 rcu_read_lock();
850 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
851 if (unlikely(!mem))
852 goto out;
853
854 switch (idx) {
855 case PGMAJFAULT:
856 mem_cgroup_pgmajfault(mem, 1);
857 break;
858 case PGFAULT:
859 mem_cgroup_pgfault(mem, 1);
860 break;
861 default:
862 BUG();
863 }
864out:
865 rcu_read_unlock();
866}
867EXPORT_SYMBOL(mem_cgroup_count_vm_event);
868
816/* 869/*
817 * Following LRU functions are allowed to be used without PCG_LOCK. 870 * Following LRU functions are allowed to be used without PCG_LOCK.
818 * Operations are called by routine of global LRU independently from memcg. 871 * Operations are called by routine of global LRU independently from memcg.
@@ -1064,9 +1117,9 @@ int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
1064 return (active > inactive); 1117 return (active > inactive);
1065} 1118}
1066 1119
1067unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, 1120unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
1068 struct zone *zone, 1121 struct zone *zone,
1069 enum lru_list lru) 1122 enum lru_list lru)
1070{ 1123{
1071 int nid = zone_to_nid(zone); 1124 int nid = zone_to_nid(zone);
1072 int zid = zone_idx(zone); 1125 int zid = zone_idx(zone);
@@ -1075,6 +1128,93 @@ unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
1075 return MEM_CGROUP_ZSTAT(mz, lru); 1128 return MEM_CGROUP_ZSTAT(mz, lru);
1076} 1129}
1077 1130
1131#ifdef CONFIG_NUMA
1132static unsigned long mem_cgroup_node_nr_file_lru_pages(struct mem_cgroup *memcg,
1133 int nid)
1134{
1135 unsigned long ret;
1136
1137 ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_FILE) +
1138 mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_FILE);
1139
1140 return ret;
1141}
1142
1143static unsigned long mem_cgroup_nr_file_lru_pages(struct mem_cgroup *memcg)
1144{
1145 u64 total = 0;
1146 int nid;
1147
1148 for_each_node_state(nid, N_HIGH_MEMORY)
1149 total += mem_cgroup_node_nr_file_lru_pages(memcg, nid);
1150
1151 return total;
1152}
1153
1154static unsigned long mem_cgroup_node_nr_anon_lru_pages(struct mem_cgroup *memcg,
1155 int nid)
1156{
1157 unsigned long ret;
1158
1159 ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_ANON) +
1160 mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_ANON);
1161
1162 return ret;
1163}
1164
1165static unsigned long mem_cgroup_nr_anon_lru_pages(struct mem_cgroup *memcg)
1166{
1167 u64 total = 0;
1168 int nid;
1169
1170 for_each_node_state(nid, N_HIGH_MEMORY)
1171 total += mem_cgroup_node_nr_anon_lru_pages(memcg, nid);
1172
1173 return total;
1174}
1175
1176static unsigned long
1177mem_cgroup_node_nr_unevictable_lru_pages(struct mem_cgroup *memcg, int nid)
1178{
1179 return mem_cgroup_get_zonestat_node(memcg, nid, LRU_UNEVICTABLE);
1180}
1181
1182static unsigned long
1183mem_cgroup_nr_unevictable_lru_pages(struct mem_cgroup *memcg)
1184{
1185 u64 total = 0;
1186 int nid;
1187
1188 for_each_node_state(nid, N_HIGH_MEMORY)
1189 total += mem_cgroup_node_nr_unevictable_lru_pages(memcg, nid);
1190
1191 return total;
1192}
1193
1194static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
1195 int nid)
1196{
1197 enum lru_list l;
1198 u64 total = 0;
1199
1200 for_each_lru(l)
1201 total += mem_cgroup_get_zonestat_node(memcg, nid, l);
1202
1203 return total;
1204}
1205
1206static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg)
1207{
1208 u64 total = 0;
1209 int nid;
1210
1211 for_each_node_state(nid, N_HIGH_MEMORY)
1212 total += mem_cgroup_node_nr_lru_pages(memcg, nid);
1213
1214 return total;
1215}
1216#endif /* CONFIG_NUMA */
1217
1078struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, 1218struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
1079 struct zone *zone) 1219 struct zone *zone)
1080{ 1220{
@@ -1418,6 +1558,81 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
1418 return ret; 1558 return ret;
1419} 1559}
1420 1560
1561#if MAX_NUMNODES > 1
1562
1563/*
1564 * Always updating the nodemask is not very good - even if we have an empty
1565 * list or the wrong list here, we can start from some node and traverse all
1566 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1567 *
1568 */
1569static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem)
1570{
1571 int nid;
1572
1573 if (time_after(mem->next_scan_node_update, jiffies))
1574 return;
1575
1576 mem->next_scan_node_update = jiffies + 10*HZ;
1577 /* make a nodemask where this memcg uses memory from */
1578 mem->scan_nodes = node_states[N_HIGH_MEMORY];
1579
1580 for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {
1581
1582 if (mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_FILE) ||
1583 mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_FILE))
1584 continue;
1585
1586 if (total_swap_pages &&
1587 (mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_ANON) ||
1588 mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_ANON)))
1589 continue;
1590 node_clear(nid, mem->scan_nodes);
1591 }
1592}
1593
1594/*
1595 * Selecting a node where we start reclaim from. Because what we need is just
1596 * reducing usage counter, start from anywhere is O,K. Considering
1597 * memory reclaim from current node, there are pros. and cons.
1598 *
1599 * Freeing memory from current node means freeing memory from a node which
1600 * we'll use or we've used. So, it may make LRU bad. And if several threads
1601 * hit limits, it will see a contention on a node. But freeing from remote
1602 * node means more costs for memory reclaim because of memory latency.
1603 *
1604 * Now, we use round-robin. Better algorithm is welcomed.
1605 */
1606int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
1607{
1608 int node;
1609
1610 mem_cgroup_may_update_nodemask(mem);
1611 node = mem->last_scanned_node;
1612
1613 node = next_node(node, mem->scan_nodes);
1614 if (node == MAX_NUMNODES)
1615 node = first_node(mem->scan_nodes);
1616 /*
1617 * We call this when we hit limit, not when pages are added to LRU.
1618 * No LRU may hold pages because all pages are UNEVICTABLE or
1619 * memcg is too small and all pages are not on LRU. In that case,
1620 * we use curret node.
1621 */
1622 if (unlikely(node == MAX_NUMNODES))
1623 node = numa_node_id();
1624
1625 mem->last_scanned_node = node;
1626 return node;
1627}
1628
1629#else
1630int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
1631{
1632 return 0;
1633}
1634#endif
1635
1421/* 1636/*
1422 * Scan the hierarchy if needed to reclaim memory. We remember the last child 1637 * Scan the hierarchy if needed to reclaim memory. We remember the last child
1423 * we reclaimed from, so that we don't end up penalizing one child extensively 1638 * we reclaimed from, so that we don't end up penalizing one child extensively
@@ -1433,7 +1648,8 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
1433static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, 1648static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1434 struct zone *zone, 1649 struct zone *zone,
1435 gfp_t gfp_mask, 1650 gfp_t gfp_mask,
1436 unsigned long reclaim_options) 1651 unsigned long reclaim_options,
1652 unsigned long *total_scanned)
1437{ 1653{
1438 struct mem_cgroup *victim; 1654 struct mem_cgroup *victim;
1439 int ret, total = 0; 1655 int ret, total = 0;
@@ -1442,6 +1658,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1442 bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK; 1658 bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
1443 bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT; 1659 bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
1444 unsigned long excess; 1660 unsigned long excess;
1661 unsigned long nr_scanned;
1445 1662
1446 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; 1663 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
1447 1664
@@ -1484,10 +1701,12 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1484 continue; 1701 continue;
1485 } 1702 }
1486 /* we use swappiness of local cgroup */ 1703 /* we use swappiness of local cgroup */
1487 if (check_soft) 1704 if (check_soft) {
1488 ret = mem_cgroup_shrink_node_zone(victim, gfp_mask, 1705 ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
1489 noswap, get_swappiness(victim), zone); 1706 noswap, get_swappiness(victim), zone,
1490 else 1707 &nr_scanned);
1708 *total_scanned += nr_scanned;
1709 } else
1491 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, 1710 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
1492 noswap, get_swappiness(victim)); 1711 noswap, get_swappiness(victim));
1493 css_put(&victim->css); 1712 css_put(&victim->css);
@@ -1503,7 +1722,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1503 if (!res_counter_soft_limit_excess(&root_mem->res)) 1722 if (!res_counter_soft_limit_excess(&root_mem->res))
1504 return total; 1723 return total;
1505 } else if (mem_cgroup_margin(root_mem)) 1724 } else if (mem_cgroup_margin(root_mem))
1506 return 1 + total; 1725 return total;
1507 } 1726 }
1508 return total; 1727 return total;
1509} 1728}
@@ -1928,7 +2147,7 @@ static int mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
1928 return CHARGE_WOULDBLOCK; 2147 return CHARGE_WOULDBLOCK;
1929 2148
1930 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL, 2149 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
1931 gfp_mask, flags); 2150 gfp_mask, flags, NULL);
1932 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2151 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
1933 return CHARGE_RETRY; 2152 return CHARGE_RETRY;
1934 /* 2153 /*
@@ -3211,7 +3430,8 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
3211 break; 3430 break;
3212 3431
3213 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL, 3432 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
3214 MEM_CGROUP_RECLAIM_SHRINK); 3433 MEM_CGROUP_RECLAIM_SHRINK,
3434 NULL);
3215 curusage = res_counter_read_u64(&memcg->res, RES_USAGE); 3435 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3216 /* Usage is reduced ? */ 3436 /* Usage is reduced ? */
3217 if (curusage >= oldusage) 3437 if (curusage >= oldusage)
@@ -3271,7 +3491,8 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
3271 3491
3272 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL, 3492 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
3273 MEM_CGROUP_RECLAIM_NOSWAP | 3493 MEM_CGROUP_RECLAIM_NOSWAP |
3274 MEM_CGROUP_RECLAIM_SHRINK); 3494 MEM_CGROUP_RECLAIM_SHRINK,
3495 NULL);
3275 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 3496 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3276 /* Usage is reduced ? */ 3497 /* Usage is reduced ? */
3277 if (curusage >= oldusage) 3498 if (curusage >= oldusage)
@@ -3285,7 +3506,8 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
3285} 3506}
3286 3507
3287unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 3508unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3288 gfp_t gfp_mask) 3509 gfp_t gfp_mask,
3510 unsigned long *total_scanned)
3289{ 3511{
3290 unsigned long nr_reclaimed = 0; 3512 unsigned long nr_reclaimed = 0;
3291 struct mem_cgroup_per_zone *mz, *next_mz = NULL; 3513 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
@@ -3293,6 +3515,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3293 int loop = 0; 3515 int loop = 0;
3294 struct mem_cgroup_tree_per_zone *mctz; 3516 struct mem_cgroup_tree_per_zone *mctz;
3295 unsigned long long excess; 3517 unsigned long long excess;
3518 unsigned long nr_scanned;
3296 3519
3297 if (order > 0) 3520 if (order > 0)
3298 return 0; 3521 return 0;
@@ -3311,10 +3534,13 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3311 if (!mz) 3534 if (!mz)
3312 break; 3535 break;
3313 3536
3537 nr_scanned = 0;
3314 reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone, 3538 reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
3315 gfp_mask, 3539 gfp_mask,
3316 MEM_CGROUP_RECLAIM_SOFT); 3540 MEM_CGROUP_RECLAIM_SOFT,
3541 &nr_scanned);
3317 nr_reclaimed += reclaimed; 3542 nr_reclaimed += reclaimed;
3543 *total_scanned += nr_scanned;
3318 spin_lock(&mctz->lock); 3544 spin_lock(&mctz->lock);
3319 3545
3320 /* 3546 /*
@@ -3337,10 +3563,9 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3337 */ 3563 */
3338 next_mz = 3564 next_mz =
3339 __mem_cgroup_largest_soft_limit_node(mctz); 3565 __mem_cgroup_largest_soft_limit_node(mctz);
3340 if (next_mz == mz) { 3566 if (next_mz == mz)
3341 css_put(&next_mz->mem->css); 3567 css_put(&next_mz->mem->css);
3342 next_mz = NULL; 3568 else /* next_mz == NULL or other memcg */
3343 } else /* next_mz == NULL or other memcg */
3344 break; 3569 break;
3345 } while (1); 3570 } while (1);
3346 } 3571 }
@@ -3772,6 +3997,8 @@ enum {
3772 MCS_PGPGIN, 3997 MCS_PGPGIN,
3773 MCS_PGPGOUT, 3998 MCS_PGPGOUT,
3774 MCS_SWAP, 3999 MCS_SWAP,
4000 MCS_PGFAULT,
4001 MCS_PGMAJFAULT,
3775 MCS_INACTIVE_ANON, 4002 MCS_INACTIVE_ANON,
3776 MCS_ACTIVE_ANON, 4003 MCS_ACTIVE_ANON,
3777 MCS_INACTIVE_FILE, 4004 MCS_INACTIVE_FILE,
@@ -3794,6 +4021,8 @@ struct {
3794 {"pgpgin", "total_pgpgin"}, 4021 {"pgpgin", "total_pgpgin"},
3795 {"pgpgout", "total_pgpgout"}, 4022 {"pgpgout", "total_pgpgout"},
3796 {"swap", "total_swap"}, 4023 {"swap", "total_swap"},
4024 {"pgfault", "total_pgfault"},
4025 {"pgmajfault", "total_pgmajfault"},
3797 {"inactive_anon", "total_inactive_anon"}, 4026 {"inactive_anon", "total_inactive_anon"},
3798 {"active_anon", "total_active_anon"}, 4027 {"active_anon", "total_active_anon"},
3799 {"inactive_file", "total_inactive_file"}, 4028 {"inactive_file", "total_inactive_file"},
@@ -3822,6 +4051,10 @@ mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
3822 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT); 4051 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
3823 s->stat[MCS_SWAP] += val * PAGE_SIZE; 4052 s->stat[MCS_SWAP] += val * PAGE_SIZE;
3824 } 4053 }
4054 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGFAULT);
4055 s->stat[MCS_PGFAULT] += val;
4056 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGMAJFAULT);
4057 s->stat[MCS_PGMAJFAULT] += val;
3825 4058
3826 /* per zone stat */ 4059 /* per zone stat */
3827 val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON); 4060 val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
@@ -3845,6 +4078,51 @@ mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
3845 mem_cgroup_get_local_stat(iter, s); 4078 mem_cgroup_get_local_stat(iter, s);
3846} 4079}
3847 4080
4081#ifdef CONFIG_NUMA
4082static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
4083{
4084 int nid;
4085 unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
4086 unsigned long node_nr;
4087 struct cgroup *cont = m->private;
4088 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
4089
4090 total_nr = mem_cgroup_nr_lru_pages(mem_cont);
4091 seq_printf(m, "total=%lu", total_nr);
4092 for_each_node_state(nid, N_HIGH_MEMORY) {
4093 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid);
4094 seq_printf(m, " N%d=%lu", nid, node_nr);
4095 }
4096 seq_putc(m, '\n');
4097
4098 file_nr = mem_cgroup_nr_file_lru_pages(mem_cont);
4099 seq_printf(m, "file=%lu", file_nr);
4100 for_each_node_state(nid, N_HIGH_MEMORY) {
4101 node_nr = mem_cgroup_node_nr_file_lru_pages(mem_cont, nid);
4102 seq_printf(m, " N%d=%lu", nid, node_nr);
4103 }
4104 seq_putc(m, '\n');
4105
4106 anon_nr = mem_cgroup_nr_anon_lru_pages(mem_cont);
4107 seq_printf(m, "anon=%lu", anon_nr);
4108 for_each_node_state(nid, N_HIGH_MEMORY) {
4109 node_nr = mem_cgroup_node_nr_anon_lru_pages(mem_cont, nid);
4110 seq_printf(m, " N%d=%lu", nid, node_nr);
4111 }
4112 seq_putc(m, '\n');
4113
4114 unevictable_nr = mem_cgroup_nr_unevictable_lru_pages(mem_cont);
4115 seq_printf(m, "unevictable=%lu", unevictable_nr);
4116 for_each_node_state(nid, N_HIGH_MEMORY) {
4117 node_nr = mem_cgroup_node_nr_unevictable_lru_pages(mem_cont,
4118 nid);
4119 seq_printf(m, " N%d=%lu", nid, node_nr);
4120 }
4121 seq_putc(m, '\n');
4122 return 0;
4123}
4124#endif /* CONFIG_NUMA */
4125
3848static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, 4126static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
3849 struct cgroup_map_cb *cb) 4127 struct cgroup_map_cb *cb)
3850{ 4128{
@@ -3855,6 +4133,7 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
3855 memset(&mystat, 0, sizeof(mystat)); 4133 memset(&mystat, 0, sizeof(mystat));
3856 mem_cgroup_get_local_stat(mem_cont, &mystat); 4134 mem_cgroup_get_local_stat(mem_cont, &mystat);
3857 4135
4136
3858 for (i = 0; i < NR_MCS_STAT; i++) { 4137 for (i = 0; i < NR_MCS_STAT; i++) {
3859 if (i == MCS_SWAP && !do_swap_account) 4138 if (i == MCS_SWAP && !do_swap_account)
3860 continue; 4139 continue;
@@ -4278,6 +4557,22 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
4278 return 0; 4557 return 0;
4279} 4558}
4280 4559
4560#ifdef CONFIG_NUMA
4561static const struct file_operations mem_control_numa_stat_file_operations = {
4562 .read = seq_read,
4563 .llseek = seq_lseek,
4564 .release = single_release,
4565};
4566
4567static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
4568{
4569 struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
4570
4571 file->f_op = &mem_control_numa_stat_file_operations;
4572 return single_open(file, mem_control_numa_stat_show, cont);
4573}
4574#endif /* CONFIG_NUMA */
4575
4281static struct cftype mem_cgroup_files[] = { 4576static struct cftype mem_cgroup_files[] = {
4282 { 4577 {
4283 .name = "usage_in_bytes", 4578 .name = "usage_in_bytes",
@@ -4341,6 +4636,12 @@ static struct cftype mem_cgroup_files[] = {
4341 .unregister_event = mem_cgroup_oom_unregister_event, 4636 .unregister_event = mem_cgroup_oom_unregister_event,
4342 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4637 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4343 }, 4638 },
4639#ifdef CONFIG_NUMA
4640 {
4641 .name = "numa_stat",
4642 .open = mem_control_numa_stat_open,
4643 },
4644#endif
4344}; 4645};
4345 4646
4346#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 4647#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -4596,6 +4897,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
4596 res_counter_init(&mem->memsw, NULL); 4897 res_counter_init(&mem->memsw, NULL);
4597 } 4898 }
4598 mem->last_scanned_child = 0; 4899 mem->last_scanned_child = 0;
4900 mem->last_scanned_node = MAX_NUMNODES;
4599 INIT_LIST_HEAD(&mem->oom_notify); 4901 INIT_LIST_HEAD(&mem->oom_notify);
4600 4902
4601 if (parent) 4903 if (parent)
@@ -4953,8 +5255,7 @@ static void mem_cgroup_clear_mc(void)
4953 5255
4954static int mem_cgroup_can_attach(struct cgroup_subsys *ss, 5256static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4955 struct cgroup *cgroup, 5257 struct cgroup *cgroup,
4956 struct task_struct *p, 5258 struct task_struct *p)
4957 bool threadgroup)
4958{ 5259{
4959 int ret = 0; 5260 int ret = 0;
4960 struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup); 5261 struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
@@ -4993,8 +5294,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4993 5294
4994static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, 5295static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
4995 struct cgroup *cgroup, 5296 struct cgroup *cgroup,
4996 struct task_struct *p, 5297 struct task_struct *p)
4997 bool threadgroup)
4998{ 5298{
4999 mem_cgroup_clear_mc(); 5299 mem_cgroup_clear_mc();
5000} 5300}
@@ -5112,8 +5412,7 @@ retry:
5112static void mem_cgroup_move_task(struct cgroup_subsys *ss, 5412static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5113 struct cgroup *cont, 5413 struct cgroup *cont,
5114 struct cgroup *old_cont, 5414 struct cgroup *old_cont,
5115 struct task_struct *p, 5415 struct task_struct *p)
5116 bool threadgroup)
5117{ 5416{
5118 struct mm_struct *mm; 5417 struct mm_struct *mm;
5119 5418
@@ -5131,22 +5430,19 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5131#else /* !CONFIG_MMU */ 5430#else /* !CONFIG_MMU */
5132static int mem_cgroup_can_attach(struct cgroup_subsys *ss, 5431static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
5133 struct cgroup *cgroup, 5432 struct cgroup *cgroup,
5134 struct task_struct *p, 5433 struct task_struct *p)
5135 bool threadgroup)
5136{ 5434{
5137 return 0; 5435 return 0;
5138} 5436}
5139static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, 5437static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
5140 struct cgroup *cgroup, 5438 struct cgroup *cgroup,
5141 struct task_struct *p, 5439 struct task_struct *p)
5142 bool threadgroup)
5143{ 5440{
5144} 5441}
5145static void mem_cgroup_move_task(struct cgroup_subsys *ss, 5442static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5146 struct cgroup *cont, 5443 struct cgroup *cont,
5147 struct cgroup *old_cont, 5444 struct cgroup *old_cont,
5148 struct task_struct *p, 5445 struct task_struct *p)
5149 bool threadgroup)
5150{ 5446{
5151} 5447}
5152#endif 5448#endif
diff --git a/mm/memory.c b/mm/memory.c
index fc24f7d788bd..6953d3926e01 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2874,6 +2874,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2874 /* Had to read the page from swap area: Major fault */ 2874 /* Had to read the page from swap area: Major fault */
2875 ret = VM_FAULT_MAJOR; 2875 ret = VM_FAULT_MAJOR;
2876 count_vm_event(PGMAJFAULT); 2876 count_vm_event(PGMAJFAULT);
2877 mem_cgroup_count_vm_event(mm, PGMAJFAULT);
2877 } else if (PageHWPoison(page)) { 2878 } else if (PageHWPoison(page)) {
2878 /* 2879 /*
2879 * hwpoisoned dirty swapcache pages are kept for killing 2880 * hwpoisoned dirty swapcache pages are kept for killing
@@ -3413,6 +3414,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3413 __set_current_state(TASK_RUNNING); 3414 __set_current_state(TASK_RUNNING);
3414 3415
3415 count_vm_event(PGFAULT); 3416 count_vm_event(PGFAULT);
3417 mem_cgroup_count_vm_event(mm, PGFAULT);
3416 3418
3417 /* do counter updates before entering really critical section. */ 3419 /* do counter updates before entering really critical section. */
3418 check_sync_rss_stat(current); 3420 check_sync_rss_stat(current);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2a00f17c3bf4..a4e1db3f1981 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4323,10 +4323,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4323 zone->zone_pgdat = pgdat; 4323 zone->zone_pgdat = pgdat;
4324 4324
4325 zone_pcp_init(zone); 4325 zone_pcp_init(zone);
4326 for_each_lru(l) { 4326 for_each_lru(l)
4327 INIT_LIST_HEAD(&zone->lru[l].list); 4327 INIT_LIST_HEAD(&zone->lru[l].list);
4328 zone->reclaim_stat.nr_saved_scan[l] = 0;
4329 }
4330 zone->reclaim_stat.recent_rotated[0] = 0; 4328 zone->reclaim_stat.recent_rotated[0] = 0;
4331 zone->reclaim_stat.recent_rotated[1] = 0; 4329 zone->reclaim_stat.recent_rotated[1] = 0;
4332 zone->reclaim_stat.recent_scanned[0] = 0; 4330 zone->reclaim_stat.recent_scanned[0] = 0;
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 2daadc322ba6..74ccff61d1be 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -130,7 +130,7 @@ struct page *lookup_cgroup_page(struct page_cgroup *pc)
130 return page; 130 return page;
131} 131}
132 132
133static void *__init_refok alloc_page_cgroup(size_t size, int nid) 133static void *__meminit alloc_page_cgroup(size_t size, int nid)
134{ 134{
135 void *addr = NULL; 135 void *addr = NULL;
136 136
@@ -162,7 +162,7 @@ static void free_page_cgroup(void *addr)
162} 162}
163#endif 163#endif
164 164
165static int __init_refok init_section_page_cgroup(unsigned long pfn) 165static int __meminit init_section_page_cgroup(unsigned long pfn)
166{ 166{
167 struct page_cgroup *base, *pc; 167 struct page_cgroup *base, *pc;
168 struct mem_section *section; 168 struct mem_section *section;
@@ -475,7 +475,7 @@ int swap_cgroup_swapon(int type, unsigned long max_pages)
475 if (!do_swap_account) 475 if (!do_swap_account)
476 return 0; 476 return 0;
477 477
478 length = ((max_pages/SC_PER_PAGE) + 1); 478 length = DIV_ROUND_UP(max_pages, SC_PER_PAGE);
479 array_size = length * sizeof(void *); 479 array_size = length * sizeof(void *);
480 480
481 array = vmalloc(array_size); 481 array = vmalloc(array_size);
@@ -492,8 +492,8 @@ int swap_cgroup_swapon(int type, unsigned long max_pages)
492 /* memory shortage */ 492 /* memory shortage */
493 ctrl->map = NULL; 493 ctrl->map = NULL;
494 ctrl->length = 0; 494 ctrl->length = 0;
495 vfree(array);
496 mutex_unlock(&swap_cgroup_mutex); 495 mutex_unlock(&swap_cgroup_mutex);
496 vfree(array);
497 goto nomem; 497 goto nomem;
498 } 498 }
499 mutex_unlock(&swap_cgroup_mutex); 499 mutex_unlock(&swap_cgroup_mutex);
@@ -508,7 +508,8 @@ nomem:
508 508
509void swap_cgroup_swapoff(int type) 509void swap_cgroup_swapoff(int type)
510{ 510{
511 int i; 511 struct page **map;
512 unsigned long i, length;
512 struct swap_cgroup_ctrl *ctrl; 513 struct swap_cgroup_ctrl *ctrl;
513 514
514 if (!do_swap_account) 515 if (!do_swap_account)
@@ -516,17 +517,20 @@ void swap_cgroup_swapoff(int type)
516 517
517 mutex_lock(&swap_cgroup_mutex); 518 mutex_lock(&swap_cgroup_mutex);
518 ctrl = &swap_cgroup_ctrl[type]; 519 ctrl = &swap_cgroup_ctrl[type];
519 if (ctrl->map) { 520 map = ctrl->map;
520 for (i = 0; i < ctrl->length; i++) { 521 length = ctrl->length;
521 struct page *page = ctrl->map[i]; 522 ctrl->map = NULL;
523 ctrl->length = 0;
524 mutex_unlock(&swap_cgroup_mutex);
525
526 if (map) {
527 for (i = 0; i < length; i++) {
528 struct page *page = map[i];
522 if (page) 529 if (page)
523 __free_page(page); 530 __free_page(page);
524 } 531 }
525 vfree(ctrl->map); 532 vfree(map);
526 ctrl->map = NULL;
527 ctrl->length = 0;
528 } 533 }
529 mutex_unlock(&swap_cgroup_mutex);
530} 534}
531 535
532#endif 536#endif
diff --git a/mm/shmem.c b/mm/shmem.c
index 69edb45a9f28..1acfb2687bfa 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1305,12 +1305,10 @@ repeat:
1305 swappage = lookup_swap_cache(swap); 1305 swappage = lookup_swap_cache(swap);
1306 if (!swappage) { 1306 if (!swappage) {
1307 shmem_swp_unmap(entry); 1307 shmem_swp_unmap(entry);
1308 spin_unlock(&info->lock);
1308 /* here we actually do the io */ 1309 /* here we actually do the io */
1309 if (type && !(*type & VM_FAULT_MAJOR)) { 1310 if (type)
1310 __count_vm_event(PGMAJFAULT);
1311 *type |= VM_FAULT_MAJOR; 1311 *type |= VM_FAULT_MAJOR;
1312 }
1313 spin_unlock(&info->lock);
1314 swappage = shmem_swapin(swap, gfp, info, idx); 1312 swappage = shmem_swapin(swap, gfp, info, idx);
1315 if (!swappage) { 1313 if (!swappage) {
1316 spin_lock(&info->lock); 1314 spin_lock(&info->lock);
@@ -1549,7 +1547,10 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1549 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1547 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1550 if (error) 1548 if (error)
1551 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1549 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1552 1550 if (ret & VM_FAULT_MAJOR) {
1551 count_vm_event(PGMAJFAULT);
1552 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1553 }
1553 return ret | VM_FAULT_LOCKED; 1554 return ret | VM_FAULT_LOCKED;
1554} 1555}
1555 1556
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index b5ccf3158d82..1d34d75366a7 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2153,10 +2153,6 @@ struct vm_struct *alloc_vm_area(size_t size)
2153 return NULL; 2153 return NULL;
2154 } 2154 }
2155 2155
2156 /* Make sure the pagetables are constructed in process kernel
2157 mappings */
2158 vmalloc_sync_all();
2159
2160 return area; 2156 return area;
2161} 2157}
2162EXPORT_SYMBOL_GPL(alloc_vm_area); 2158EXPORT_SYMBOL_GPL(alloc_vm_area);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7e0116150dc7..faa0a088f9cc 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -173,7 +173,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
173 struct scan_control *sc, enum lru_list lru) 173 struct scan_control *sc, enum lru_list lru)
174{ 174{
175 if (!scanning_global_lru(sc)) 175 if (!scanning_global_lru(sc))
176 return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru); 176 return mem_cgroup_zone_nr_lru_pages(sc->mem_cgroup, zone, lru);
177 177
178 return zone_page_state(zone, NR_LRU_BASE + lru); 178 return zone_page_state(zone, NR_LRU_BASE + lru);
179} 179}
@@ -1718,26 +1718,6 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1718} 1718}
1719 1719
1720/* 1720/*
1721 * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
1722 * until we collected @swap_cluster_max pages to scan.
1723 */
1724static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
1725 unsigned long *nr_saved_scan)
1726{
1727 unsigned long nr;
1728
1729 *nr_saved_scan += nr_to_scan;
1730 nr = *nr_saved_scan;
1731
1732 if (nr >= SWAP_CLUSTER_MAX)
1733 *nr_saved_scan = 0;
1734 else
1735 nr = 0;
1736
1737 return nr;
1738}
1739
1740/*
1741 * Determine how aggressively the anon and file LRU lists should be 1721 * Determine how aggressively the anon and file LRU lists should be
1742 * scanned. The relative value of each set of LRU lists is determined 1722 * scanned. The relative value of each set of LRU lists is determined
1743 * by looking at the fraction of the pages scanned we did rotate back 1723 * by looking at the fraction of the pages scanned we did rotate back
@@ -1755,6 +1735,22 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
1755 u64 fraction[2], denominator; 1735 u64 fraction[2], denominator;
1756 enum lru_list l; 1736 enum lru_list l;
1757 int noswap = 0; 1737 int noswap = 0;
1738 int force_scan = 0;
1739
1740
1741 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
1742 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
1743 file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
1744 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
1745
1746 if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) {
1747 /* kswapd does zone balancing and need to scan this zone */
1748 if (scanning_global_lru(sc) && current_is_kswapd())
1749 force_scan = 1;
1750 /* memcg may have small limit and need to avoid priority drop */
1751 if (!scanning_global_lru(sc))
1752 force_scan = 1;
1753 }
1758 1754
1759 /* If we have no swap space, do not bother scanning anon pages. */ 1755 /* If we have no swap space, do not bother scanning anon pages. */
1760 if (!sc->may_swap || (nr_swap_pages <= 0)) { 1756 if (!sc->may_swap || (nr_swap_pages <= 0)) {
@@ -1765,11 +1761,6 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
1765 goto out; 1761 goto out;
1766 } 1762 }
1767 1763
1768 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
1769 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
1770 file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
1771 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
1772
1773 if (scanning_global_lru(sc)) { 1764 if (scanning_global_lru(sc)) {
1774 free = zone_page_state(zone, NR_FREE_PAGES); 1765 free = zone_page_state(zone, NR_FREE_PAGES);
1775 /* If we have very few page cache pages, 1766 /* If we have very few page cache pages,
@@ -1836,8 +1827,23 @@ out:
1836 scan >>= priority; 1827 scan >>= priority;
1837 scan = div64_u64(scan * fraction[file], denominator); 1828 scan = div64_u64(scan * fraction[file], denominator);
1838 } 1829 }
1839 nr[l] = nr_scan_try_batch(scan, 1830
1840 &reclaim_stat->nr_saved_scan[l]); 1831 /*
1832 * If zone is small or memcg is small, nr[l] can be 0.
1833 * This results no-scan on this priority and priority drop down.
1834 * For global direct reclaim, it can visit next zone and tend
1835 * not to have problems. For global kswapd, it's for zone
1836 * balancing and it need to scan a small amounts. When using
1837 * memcg, priority drop can cause big latency. So, it's better
1838 * to scan small amount. See may_noscan above.
1839 */
1840 if (!scan && force_scan) {
1841 if (file)
1842 scan = SWAP_CLUSTER_MAX;
1843 else if (!noswap)
1844 scan = SWAP_CLUSTER_MAX;
1845 }
1846 nr[l] = scan;
1841 } 1847 }
1842} 1848}
1843 1849
@@ -1977,11 +1983,14 @@ restart:
1977 * If a zone is deemed to be full of pinned pages then just give it a light 1983 * If a zone is deemed to be full of pinned pages then just give it a light
1978 * scan then give up on it. 1984 * scan then give up on it.
1979 */ 1985 */
1980static void shrink_zones(int priority, struct zonelist *zonelist, 1986static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
1981 struct scan_control *sc) 1987 struct scan_control *sc)
1982{ 1988{
1983 struct zoneref *z; 1989 struct zoneref *z;
1984 struct zone *zone; 1990 struct zone *zone;
1991 unsigned long nr_soft_reclaimed;
1992 unsigned long nr_soft_scanned;
1993 unsigned long total_scanned = 0;
1985 1994
1986 for_each_zone_zonelist_nodemask(zone, z, zonelist, 1995 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1987 gfp_zone(sc->gfp_mask), sc->nodemask) { 1996 gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -1998,8 +2007,17 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
1998 continue; /* Let kswapd poll it */ 2007 continue; /* Let kswapd poll it */
1999 } 2008 }
2000 2009
2010 nr_soft_scanned = 0;
2011 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2012 sc->order, sc->gfp_mask,
2013 &nr_soft_scanned);
2014 sc->nr_reclaimed += nr_soft_reclaimed;
2015 total_scanned += nr_soft_scanned;
2016
2001 shrink_zone(priority, zone, sc); 2017 shrink_zone(priority, zone, sc);
2002 } 2018 }
2019
2020 return total_scanned;
2003} 2021}
2004 2022
2005static bool zone_reclaimable(struct zone *zone) 2023static bool zone_reclaimable(struct zone *zone)
@@ -2064,7 +2082,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2064 sc->nr_scanned = 0; 2082 sc->nr_scanned = 0;
2065 if (!priority) 2083 if (!priority)
2066 disable_swap_token(); 2084 disable_swap_token();
2067 shrink_zones(priority, zonelist, sc); 2085 total_scanned += shrink_zones(priority, zonelist, sc);
2068 /* 2086 /*
2069 * Don't shrink slabs when reclaiming memory from 2087 * Don't shrink slabs when reclaiming memory from
2070 * over limit cgroups 2088 * over limit cgroups
@@ -2171,9 +2189,11 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2171unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 2189unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2172 gfp_t gfp_mask, bool noswap, 2190 gfp_t gfp_mask, bool noswap,
2173 unsigned int swappiness, 2191 unsigned int swappiness,
2174 struct zone *zone) 2192 struct zone *zone,
2193 unsigned long *nr_scanned)
2175{ 2194{
2176 struct scan_control sc = { 2195 struct scan_control sc = {
2196 .nr_scanned = 0,
2177 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2197 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2178 .may_writepage = !laptop_mode, 2198 .may_writepage = !laptop_mode,
2179 .may_unmap = 1, 2199 .may_unmap = 1,
@@ -2182,6 +2202,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2182 .order = 0, 2202 .order = 0,
2183 .mem_cgroup = mem, 2203 .mem_cgroup = mem,
2184 }; 2204 };
2205
2185 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2206 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2186 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 2207 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2187 2208
@@ -2200,6 +2221,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2200 2221
2201 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 2222 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2202 2223
2224 *nr_scanned = sc.nr_scanned;
2203 return sc.nr_reclaimed; 2225 return sc.nr_reclaimed;
2204} 2226}
2205 2227
@@ -2210,6 +2232,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2210{ 2232{
2211 struct zonelist *zonelist; 2233 struct zonelist *zonelist;
2212 unsigned long nr_reclaimed; 2234 unsigned long nr_reclaimed;
2235 int nid;
2213 struct scan_control sc = { 2236 struct scan_control sc = {
2214 .may_writepage = !laptop_mode, 2237 .may_writepage = !laptop_mode,
2215 .may_unmap = 1, 2238 .may_unmap = 1,
@@ -2226,7 +2249,14 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2226 .gfp_mask = sc.gfp_mask, 2249 .gfp_mask = sc.gfp_mask,
2227 }; 2250 };
2228 2251
2229 zonelist = NODE_DATA(numa_node_id())->node_zonelists; 2252 /*
2253 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
2254 * take care of from where we get pages. So the node where we start the
2255 * scan does not need to be the current node.
2256 */
2257 nid = mem_cgroup_select_victim_node(mem_cont);
2258
2259 zonelist = NODE_DATA(nid)->node_zonelists;
2230 2260
2231 trace_mm_vmscan_memcg_reclaim_begin(0, 2261 trace_mm_vmscan_memcg_reclaim_begin(0,
2232 sc.may_writepage, 2262 sc.may_writepage,
@@ -2347,6 +2377,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2347 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 2377 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
2348 unsigned long total_scanned; 2378 unsigned long total_scanned;
2349 struct reclaim_state *reclaim_state = current->reclaim_state; 2379 struct reclaim_state *reclaim_state = current->reclaim_state;
2380 unsigned long nr_soft_reclaimed;
2381 unsigned long nr_soft_scanned;
2350 struct scan_control sc = { 2382 struct scan_control sc = {
2351 .gfp_mask = GFP_KERNEL, 2383 .gfp_mask = GFP_KERNEL,
2352 .may_unmap = 1, 2384 .may_unmap = 1,
@@ -2439,11 +2471,15 @@ loop_again:
2439 2471
2440 sc.nr_scanned = 0; 2472 sc.nr_scanned = 0;
2441 2473
2474 nr_soft_scanned = 0;
2442 /* 2475 /*
2443 * Call soft limit reclaim before calling shrink_zone. 2476 * Call soft limit reclaim before calling shrink_zone.
2444 * For now we ignore the return value
2445 */ 2477 */
2446 mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask); 2478 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2479 order, sc.gfp_mask,
2480 &nr_soft_scanned);
2481 sc.nr_reclaimed += nr_soft_reclaimed;
2482 total_scanned += nr_soft_scanned;
2447 2483
2448 /* 2484 /*
2449 * We put equal pressure on every zone, unless 2485 * We put equal pressure on every zone, unless
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index b2274d1fd605..c7a581a96894 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -46,8 +46,6 @@ int vlan_net_id __read_mostly;
46 46
47const char vlan_fullname[] = "802.1Q VLAN Support"; 47const char vlan_fullname[] = "802.1Q VLAN Support";
48const char vlan_version[] = DRV_VERSION; 48const char vlan_version[] = DRV_VERSION;
49static const char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>";
50static const char vlan_buggyright[] = "David S. Miller <davem@redhat.com>";
51 49
52/* End of global variables definitions. */ 50/* End of global variables definitions. */
53 51
@@ -673,8 +671,7 @@ static int __init vlan_proto_init(void)
673{ 671{
674 int err; 672 int err;
675 673
676 pr_info("%s v%s %s\n", vlan_fullname, vlan_version, vlan_copyright); 674 pr_info("%s v%s\n", vlan_fullname, vlan_version);
677 pr_info("All bugs added by %s\n", vlan_buggyright);
678 675
679 err = register_pernet_subsys(&vlan_net_ops); 676 err = register_pernet_subsys(&vlan_net_ops);
680 if (err < 0) 677 if (err < 0)
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
index f7fa67c78766..f49da5814bc3 100644
--- a/net/atm/atm_sysfs.c
+++ b/net/atm/atm_sysfs.c
@@ -59,6 +59,14 @@ static ssize_t show_atmaddress(struct device *cdev,
59 return pos - buf; 59 return pos - buf;
60} 60}
61 61
62static ssize_t show_atmindex(struct device *cdev,
63 struct device_attribute *attr, char *buf)
64{
65 struct atm_dev *adev = to_atm_dev(cdev);
66
67 return sprintf(buf, "%d\n", adev->number);
68}
69
62static ssize_t show_carrier(struct device *cdev, 70static ssize_t show_carrier(struct device *cdev,
63 struct device_attribute *attr, char *buf) 71 struct device_attribute *attr, char *buf)
64{ 72{
@@ -99,6 +107,7 @@ static ssize_t show_link_rate(struct device *cdev,
99 107
100static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); 108static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
101static DEVICE_ATTR(atmaddress, S_IRUGO, show_atmaddress, NULL); 109static DEVICE_ATTR(atmaddress, S_IRUGO, show_atmaddress, NULL);
110static DEVICE_ATTR(atmindex, S_IRUGO, show_atmindex, NULL);
102static DEVICE_ATTR(carrier, S_IRUGO, show_carrier, NULL); 111static DEVICE_ATTR(carrier, S_IRUGO, show_carrier, NULL);
103static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); 112static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
104static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL); 113static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL);
@@ -106,6 +115,7 @@ static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL);
106static struct device_attribute *atm_attrs[] = { 115static struct device_attribute *atm_attrs[] = {
107 &dev_attr_atmaddress, 116 &dev_attr_atmaddress,
108 &dev_attr_address, 117 &dev_attr_address,
118 &dev_attr_atmindex,
109 &dev_attr_carrier, 119 &dev_attr_carrier,
110 &dev_attr_type, 120 &dev_attr_type,
111 &dev_attr_link_rate, 121 &dev_attr_link_rate,
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 25073b6ef474..ba48daa68c1f 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1171,7 +1171,7 @@ static int __init lane_module_init(void)
1171#endif 1171#endif
1172 1172
1173 register_atm_ioctl(&lane_ioctl_ops); 1173 register_atm_ioctl(&lane_ioctl_ops);
1174 pr_info("lec.c: " __DATE__ " " __TIME__ " initialized\n"); 1174 pr_info("lec.c: initialized\n");
1175 return 0; 1175 return 0;
1176} 1176}
1177 1177
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 644cdf071642..3ccca42e6f90 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -1482,7 +1482,7 @@ static __init int atm_mpoa_init(void)
1482 if (mpc_proc_init() != 0) 1482 if (mpc_proc_init() != 0)
1483 pr_info("failed to initialize /proc/mpoa\n"); 1483 pr_info("failed to initialize /proc/mpoa\n");
1484 1484
1485 pr_info("mpc.c: " __DATE__ " " __TIME__ " initialized\n"); 1485 pr_info("mpc.c: initialized\n");
1486 1486
1487 return 0; 1487 return 0;
1488} 1488}
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 1a92b369c820..2b5ca1a0054d 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1883,14 +1883,13 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1883 struct xt_target *wt; 1883 struct xt_target *wt;
1884 void *dst = NULL; 1884 void *dst = NULL;
1885 int off, pad = 0; 1885 int off, pad = 0;
1886 unsigned int size_kern, entry_offset, match_size = mwt->match_size; 1886 unsigned int size_kern, match_size = mwt->match_size;
1887 1887
1888 strlcpy(name, mwt->u.name, sizeof(name)); 1888 strlcpy(name, mwt->u.name, sizeof(name));
1889 1889
1890 if (state->buf_kern_start) 1890 if (state->buf_kern_start)
1891 dst = state->buf_kern_start + state->buf_kern_offset; 1891 dst = state->buf_kern_start + state->buf_kern_offset;
1892 1892
1893 entry_offset = (unsigned char *) mwt - base;
1894 switch (compat_mwt) { 1893 switch (compat_mwt) {
1895 case EBT_COMPAT_MATCH: 1894 case EBT_COMPAT_MATCH:
1896 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE, 1895 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
@@ -1933,6 +1932,9 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1933 size_kern = wt->targetsize; 1932 size_kern = wt->targetsize;
1934 module_put(wt->me); 1933 module_put(wt->me);
1935 break; 1934 break;
1935
1936 default:
1937 return -EINVAL;
1936 } 1938 }
1937 1939
1938 state->buf_kern_offset += match_size + off; 1940 state->buf_kern_offset += match_size + off;
diff --git a/net/can/proc.c b/net/can/proc.c
index f4265cc9c3fb..0016f7339699 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -204,12 +204,11 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
204 204
205 hlist_for_each_entry_rcu(r, n, rx_list, list) { 205 hlist_for_each_entry_rcu(r, n, rx_list, list) {
206 char *fmt = (r->can_id & CAN_EFF_FLAG)? 206 char *fmt = (r->can_id & CAN_EFF_FLAG)?
207 " %-5s %08X %08x %08x %08x %8ld %s\n" : 207 " %-5s %08x %08x %pK %pK %8ld %s\n" :
208 " %-5s %03X %08x %08lx %08lx %8ld %s\n"; 208 " %-5s %03x %08x %pK %pK %8ld %s\n";
209 209
210 seq_printf(m, fmt, DNAME(dev), r->can_id, r->mask, 210 seq_printf(m, fmt, DNAME(dev), r->can_id, r->mask,
211 (unsigned long)r->func, (unsigned long)r->data, 211 r->func, r->data, r->matches, r->ident);
212 r->matches, r->ident);
213 } 212 }
214} 213}
215 214
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 84e7304532e6..fd14116ad7f0 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -233,6 +233,29 @@ static int ethtool_set_feature_compat(struct net_device *dev,
233 return 1; 233 return 1;
234} 234}
235 235
236static int ethtool_set_flags_compat(struct net_device *dev,
237 int (*legacy_set)(struct net_device *, u32),
238 struct ethtool_set_features_block *features, u32 mask)
239{
240 u32 value;
241
242 if (!legacy_set)
243 return 0;
244
245 if (!(features[0].valid & mask))
246 return 0;
247
248 value = dev->features & ~features[0].valid;
249 value |= features[0].requested;
250
251 features[0].valid &= ~mask;
252
253 if (legacy_set(dev, value & mask) < 0)
254 netdev_info(dev, "Legacy flags change failed\n");
255
256 return 1;
257}
258
236static int ethtool_set_features_compat(struct net_device *dev, 259static int ethtool_set_features_compat(struct net_device *dev,
237 struct ethtool_set_features_block *features) 260 struct ethtool_set_features_block *features)
238{ 261{
@@ -249,7 +272,7 @@ static int ethtool_set_features_compat(struct net_device *dev,
249 features, NETIF_F_ALL_TSO); 272 features, NETIF_F_ALL_TSO);
250 compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum, 273 compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum,
251 features, NETIF_F_RXCSUM); 274 features, NETIF_F_RXCSUM);
252 compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_flags, 275 compat |= ethtool_set_flags_compat(dev, dev->ethtool_ops->set_flags,
253 features, flags_dup_features); 276 features, flags_dup_features);
254 277
255 return compat; 278 return compat;
diff --git a/net/core/filter.c b/net/core/filter.c
index 0e3622f1dcb1..36f975fa87cb 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -38,6 +38,7 @@
38#include <asm/unaligned.h> 38#include <asm/unaligned.h>
39#include <linux/filter.h> 39#include <linux/filter.h>
40#include <linux/reciprocal_div.h> 40#include <linux/reciprocal_div.h>
41#include <linux/ratelimit.h>
41 42
42/* No hurry in this branch */ 43/* No hurry in this branch */
43static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size) 44static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index a829e3f60aeb..77a65f031488 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -17,6 +17,7 @@
17 17
18#include <net/ip.h> 18#include <net/ip.h>
19#include <net/sock.h> 19#include <net/sock.h>
20#include <net/net_ratelimit.h>
20 21
21#ifdef CONFIG_RPS 22#ifdef CONFIG_RPS
22static int rps_sock_flow_sysctl(ctl_table *table, int write, 23static int rps_sock_flow_sysctl(ctl_table *table, int write,
diff --git a/net/core/utils.c b/net/core/utils.c
index 2012bc797f9c..386e263f6066 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -27,6 +27,7 @@
27#include <linux/ratelimit.h> 27#include <linux/ratelimit.h>
28 28
29#include <net/sock.h> 29#include <net/sock.h>
30#include <net/net_ratelimit.h>
30 31
31#include <asm/byteorder.h> 32#include <asm/byteorder.h>
32#include <asm/system.h> 33#include <asm/system.h>
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 9df4e635fb5f..ce616d92cc54 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -154,11 +154,9 @@ void __init inet_initpeers(void)
154/* Called with or without local BH being disabled. */ 154/* Called with or without local BH being disabled. */
155static void unlink_from_unused(struct inet_peer *p) 155static void unlink_from_unused(struct inet_peer *p)
156{ 156{
157 if (!list_empty(&p->unused)) { 157 spin_lock_bh(&unused_peers.lock);
158 spin_lock_bh(&unused_peers.lock); 158 list_del_init(&p->unused);
159 list_del_init(&p->unused); 159 spin_unlock_bh(&unused_peers.lock);
160 spin_unlock_bh(&unused_peers.lock);
161 }
162} 160}
163 161
164static int addr_compare(const struct inetpeer_addr *a, 162static int addr_compare(const struct inetpeer_addr *a,
@@ -205,6 +203,20 @@ static int addr_compare(const struct inetpeer_addr *a,
205 u; \ 203 u; \
206}) 204})
207 205
206static bool atomic_add_unless_return(atomic_t *ptr, int a, int u, int *newv)
207{
208 int cur, old = atomic_read(ptr);
209
210 while (old != u) {
211 *newv = old + a;
212 cur = atomic_cmpxchg(ptr, old, *newv);
213 if (cur == old)
214 return true;
215 old = cur;
216 }
217 return false;
218}
219
208/* 220/*
209 * Called with rcu_read_lock() 221 * Called with rcu_read_lock()
210 * Because we hold no lock against a writer, its quite possible we fall 222 * Because we hold no lock against a writer, its quite possible we fall
@@ -213,7 +225,8 @@ static int addr_compare(const struct inetpeer_addr *a,
213 * We exit from this function if number of links exceeds PEER_MAXDEPTH 225 * We exit from this function if number of links exceeds PEER_MAXDEPTH
214 */ 226 */
215static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr, 227static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
216 struct inet_peer_base *base) 228 struct inet_peer_base *base,
229 int *newrefcnt)
217{ 230{
218 struct inet_peer *u = rcu_dereference(base->root); 231 struct inet_peer *u = rcu_dereference(base->root);
219 int count = 0; 232 int count = 0;
@@ -226,7 +239,7 @@ static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
226 * distinction between an unused entry (refcnt=0) and 239 * distinction between an unused entry (refcnt=0) and
227 * a freed one. 240 * a freed one.
228 */ 241 */
229 if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1))) 242 if (!atomic_add_unless_return(&u->refcnt, 1, -1, newrefcnt))
230 u = NULL; 243 u = NULL;
231 return u; 244 return u;
232 } 245 }
@@ -465,22 +478,23 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
465 struct inet_peer_base *base = family_to_base(daddr->family); 478 struct inet_peer_base *base = family_to_base(daddr->family);
466 struct inet_peer *p; 479 struct inet_peer *p;
467 unsigned int sequence; 480 unsigned int sequence;
468 int invalidated; 481 int invalidated, newrefcnt = 0;
469 482
470 /* Look up for the address quickly, lockless. 483 /* Look up for the address quickly, lockless.
471 * Because of a concurrent writer, we might not find an existing entry. 484 * Because of a concurrent writer, we might not find an existing entry.
472 */ 485 */
473 rcu_read_lock(); 486 rcu_read_lock();
474 sequence = read_seqbegin(&base->lock); 487 sequence = read_seqbegin(&base->lock);
475 p = lookup_rcu(daddr, base); 488 p = lookup_rcu(daddr, base, &newrefcnt);
476 invalidated = read_seqretry(&base->lock, sequence); 489 invalidated = read_seqretry(&base->lock, sequence);
477 rcu_read_unlock(); 490 rcu_read_unlock();
478 491
479 if (p) { 492 if (p) {
480 /* The existing node has been found. 493found: /* The existing node has been found.
481 * Remove the entry from unused list if it was there. 494 * Remove the entry from unused list if it was there.
482 */ 495 */
483 unlink_from_unused(p); 496 if (newrefcnt == 1)
497 unlink_from_unused(p);
484 return p; 498 return p;
485 } 499 }
486 500
@@ -494,11 +508,9 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
494 write_seqlock_bh(&base->lock); 508 write_seqlock_bh(&base->lock);
495 p = lookup(daddr, stack, base); 509 p = lookup(daddr, stack, base);
496 if (p != peer_avl_empty) { 510 if (p != peer_avl_empty) {
497 atomic_inc(&p->refcnt); 511 newrefcnt = atomic_inc_return(&p->refcnt);
498 write_sequnlock_bh(&base->lock); 512 write_sequnlock_bh(&base->lock);
499 /* Remove the entry from unused list if it was there. */ 513 goto found;
500 unlink_from_unused(p);
501 return p;
502 } 514 }
503 p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL; 515 p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
504 if (p) { 516 if (p) {
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index a15c01524959..7f9124914b13 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -54,7 +54,7 @@
54#include <asm/atomic.h> 54#include <asm/atomic.h>
55#include <asm/ebcdic.h> 55#include <asm/ebcdic.h>
56#include <asm/io.h> 56#include <asm/io.h>
57#include <asm/s390_ext.h> 57#include <asm/irq.h>
58#include <asm/smp.h> 58#include <asm/smp.h>
59 59
60/* 60/*
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 72d1ac611fdc..8041befc6555 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -815,7 +815,7 @@ ip_set_flush(struct sock *ctnl, struct sk_buff *skb,
815 ip_set_id_t i; 815 ip_set_id_t i;
816 816
817 if (unlikely(protocol_failed(attr))) 817 if (unlikely(protocol_failed(attr)))
818 return -EPROTO; 818 return -IPSET_ERR_PROTOCOL;
819 819
820 if (!attr[IPSET_ATTR_SETNAME]) { 820 if (!attr[IPSET_ATTR_SETNAME]) {
821 for (i = 0; i < ip_set_max; i++) 821 for (i = 0; i < ip_set_max; i++)
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 6b5dd6ddaae9..af63553fa332 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -411,25 +411,35 @@ static struct ip_vs_app ip_vs_ftp = {
411static int __net_init __ip_vs_ftp_init(struct net *net) 411static int __net_init __ip_vs_ftp_init(struct net *net)
412{ 412{
413 int i, ret; 413 int i, ret;
414 struct ip_vs_app *app = &ip_vs_ftp; 414 struct ip_vs_app *app;
415 struct netns_ipvs *ipvs = net_ipvs(net);
416
417 app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL);
418 if (!app)
419 return -ENOMEM;
420 INIT_LIST_HEAD(&app->a_list);
421 INIT_LIST_HEAD(&app->incs_list);
422 ipvs->ftp_app = app;
415 423
416 ret = register_ip_vs_app(net, app); 424 ret = register_ip_vs_app(net, app);
417 if (ret) 425 if (ret)
418 return ret; 426 goto err_exit;
419 427
420 for (i=0; i<IP_VS_APP_MAX_PORTS; i++) { 428 for (i=0; i<IP_VS_APP_MAX_PORTS; i++) {
421 if (!ports[i]) 429 if (!ports[i])
422 continue; 430 continue;
423 ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]); 431 ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]);
424 if (ret) 432 if (ret)
425 break; 433 goto err_unreg;
426 pr_info("%s: loaded support on port[%d] = %d\n", 434 pr_info("%s: loaded support on port[%d] = %d\n",
427 app->name, i, ports[i]); 435 app->name, i, ports[i]);
428 } 436 }
437 return 0;
429 438
430 if (ret) 439err_unreg:
431 unregister_ip_vs_app(net, app); 440 unregister_ip_vs_app(net, app);
432 441err_exit:
442 kfree(ipvs->ftp_app);
433 return ret; 443 return ret;
434} 444}
435/* 445/*
@@ -437,9 +447,10 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
437 */ 447 */
438static void __ip_vs_ftp_exit(struct net *net) 448static void __ip_vs_ftp_exit(struct net *net)
439{ 449{
440 struct ip_vs_app *app = &ip_vs_ftp; 450 struct netns_ipvs *ipvs = net_ipvs(net);
441 451
442 unregister_ip_vs_app(net, app); 452 unregister_ip_vs_app(net, ipvs->ftp_app);
453 kfree(ipvs->ftp_app);
443} 454}
444 455
445static struct pernet_operations ip_vs_ftp_ops = { 456static struct pernet_operations ip_vs_ftp_ops = {
diff --git a/scripts/selinux/README b/scripts/selinux/README
index a936315ba2c8..4d020ecb7524 100644
--- a/scripts/selinux/README
+++ b/scripts/selinux/README
@@ -1,2 +1,2 @@
1Please see Documentation/SELinux.txt for information on 1Please see Documentation/security/SELinux.txt for information on
2installing a dummy SELinux policy. 2installing a dummy SELinux policy.
diff --git a/security/apparmor/match.c b/security/apparmor/match.c
index 06d764ccbbe5..94de6b4907c8 100644
--- a/security/apparmor/match.c
+++ b/security/apparmor/match.c
@@ -194,7 +194,7 @@ void aa_dfa_free_kref(struct kref *kref)
194 * @flags: flags controlling what type of accept tables are acceptable 194 * @flags: flags controlling what type of accept tables are acceptable
195 * 195 *
196 * Unpack a dfa that has been serialized. To find information on the dfa 196 * Unpack a dfa that has been serialized. To find information on the dfa
197 * format look in Documentation/apparmor.txt 197 * format look in Documentation/security/apparmor.txt
198 * Assumes the dfa @blob stream has been aligned on a 8 byte boundary 198 * Assumes the dfa @blob stream has been aligned on a 8 byte boundary
199 * 199 *
200 * Returns: an unpacked dfa ready for matching or ERR_PTR on failure 200 * Returns: an unpacked dfa ready for matching or ERR_PTR on failure
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index e33aaf7e5744..d6d9a57b5652 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -12,8 +12,8 @@
12 * published by the Free Software Foundation, version 2 of the 12 * published by the Free Software Foundation, version 2 of the
13 * License. 13 * License.
14 * 14 *
15 * AppArmor uses a serialized binary format for loading policy. 15 * AppArmor uses a serialized binary format for loading policy. To find
16 * To find policy format documentation look in Documentation/apparmor.txt 16 * policy format documentation look in Documentation/security/apparmor.txt
17 * All policy is validated before it is used. 17 * All policy is validated before it is used.
18 */ 18 */
19 19
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 8d9c48f13774..cd1f779fa51d 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -62,8 +62,7 @@ static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
62struct cgroup_subsys devices_subsys; 62struct cgroup_subsys devices_subsys;
63 63
64static int devcgroup_can_attach(struct cgroup_subsys *ss, 64static int devcgroup_can_attach(struct cgroup_subsys *ss,
65 struct cgroup *new_cgroup, struct task_struct *task, 65 struct cgroup *new_cgroup, struct task_struct *task)
66 bool threadgroup)
67{ 66{
68 if (current != task && !capable(CAP_SYS_ADMIN)) 67 if (current != task && !capable(CAP_SYS_ADMIN))
69 return -EPERM; 68 return -EPERM;
diff --git a/security/keys/encrypted.c b/security/keys/encrypted.c
index 69907a58a683..b1cba5bf0a5e 100644
--- a/security/keys/encrypted.c
+++ b/security/keys/encrypted.c
@@ -8,7 +8,7 @@
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, version 2 of the License. 9 * the Free Software Foundation, version 2 of the License.
10 * 10 *
11 * See Documentation/keys-trusted-encrypted.txt 11 * See Documentation/security/keys-trusted-encrypted.txt
12 */ 12 */
13 13
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 6c0480db8885..a3063eb3dc23 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -847,6 +847,7 @@ void key_replace_session_keyring(void)
847 new-> sgid = old-> sgid; 847 new-> sgid = old-> sgid;
848 new->fsgid = old->fsgid; 848 new->fsgid = old->fsgid;
849 new->user = get_uid(old->user); 849 new->user = get_uid(old->user);
850 new->user_ns = new->user->user_ns;
850 new->group_info = get_group_info(old->group_info); 851 new->group_info = get_group_info(old->group_info);
851 852
852 new->securebits = old->securebits; 853 new->securebits = old->securebits;
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index b18a71745901..d31862e0aa1c 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -8,7 +8,7 @@
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 * 10 *
11 * See Documentation/keys-request-key.txt 11 * See Documentation/security/keys-request-key.txt
12 */ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index f6337c9082eb..6cff37529b80 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -8,7 +8,7 @@
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 * 10 *
11 * See Documentation/keys-request-key.txt 11 * See Documentation/security/keys-request-key.txt
12 */ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index c99b9368368c..0c33e2ea1f3c 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -8,7 +8,7 @@
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, version 2 of the License. 9 * the Free Software Foundation, version 2 of the License.
10 * 10 *
11 * See Documentation/keys-trusted-encrypted.txt 11 * See Documentation/security/keys-trusted-encrypted.txt
12 */ 12 */
13 13
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index fcb89cb0f223..d515b2128a4e 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -752,10 +752,9 @@ int avc_ss_reset(u32 seqno)
752int avc_has_perm_noaudit(u32 ssid, u32 tsid, 752int avc_has_perm_noaudit(u32 ssid, u32 tsid,
753 u16 tclass, u32 requested, 753 u16 tclass, u32 requested,
754 unsigned flags, 754 unsigned flags,
755 struct av_decision *in_avd) 755 struct av_decision *avd)
756{ 756{
757 struct avc_node *node; 757 struct avc_node *node;
758 struct av_decision avd_entry, *avd;
759 int rc = 0; 758 int rc = 0;
760 u32 denied; 759 u32 denied;
761 760
@@ -766,18 +765,11 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
766 node = avc_lookup(ssid, tsid, tclass); 765 node = avc_lookup(ssid, tsid, tclass);
767 if (unlikely(!node)) { 766 if (unlikely(!node)) {
768 rcu_read_unlock(); 767 rcu_read_unlock();
769
770 if (in_avd)
771 avd = in_avd;
772 else
773 avd = &avd_entry;
774
775 security_compute_av(ssid, tsid, tclass, avd); 768 security_compute_av(ssid, tsid, tclass, avd);
776 rcu_read_lock(); 769 rcu_read_lock();
777 node = avc_insert(ssid, tsid, tclass, avd); 770 node = avc_insert(ssid, tsid, tclass, avd);
778 } else { 771 } else {
779 if (in_avd) 772 memcpy(avd, &node->ae.avd, sizeof(*avd));
780 memcpy(in_avd, &node->ae.avd, sizeof(*in_avd));
781 avd = &node->ae.avd; 773 avd = &node->ae.avd;
782 } 774 }
783 775
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index c3e4b52699f4..973e00e34fa9 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -2217,10 +2217,11 @@ out_unlock:
2217 goto out; 2217 goto out;
2218 } 2218 }
2219 for (i = 0, j = 0; i < mynel; i++) { 2219 for (i = 0, j = 0; i < mynel; i++) {
2220 struct av_decision dummy_avd;
2220 rc = avc_has_perm_noaudit(fromsid, mysids[i], 2221 rc = avc_has_perm_noaudit(fromsid, mysids[i],
2221 SECCLASS_PROCESS, /* kernel value */ 2222 SECCLASS_PROCESS, /* kernel value */
2222 PROCESS__TRANSITION, AVC_STRICT, 2223 PROCESS__TRANSITION, AVC_STRICT,
2223 NULL); 2224 &dummy_avd);
2224 if (!rc) 2225 if (!rc)
2225 mysids2[j++] = mysids[i]; 2226 mysids2[j++] = mysids[i];
2226 cond_resched(); 2227 cond_resched();
diff --git a/sound/core/control.c b/sound/core/control.c
index 5d98194bcad5..f8c5be464510 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -704,13 +704,12 @@ static int snd_ctl_elem_list(struct snd_card *card,
704 struct snd_ctl_elem_list list; 704 struct snd_ctl_elem_list list;
705 struct snd_kcontrol *kctl; 705 struct snd_kcontrol *kctl;
706 struct snd_ctl_elem_id *dst, *id; 706 struct snd_ctl_elem_id *dst, *id;
707 unsigned int offset, space, first, jidx; 707 unsigned int offset, space, jidx;
708 708
709 if (copy_from_user(&list, _list, sizeof(list))) 709 if (copy_from_user(&list, _list, sizeof(list)))
710 return -EFAULT; 710 return -EFAULT;
711 offset = list.offset; 711 offset = list.offset;
712 space = list.space; 712 space = list.space;
713 first = 0;
714 /* try limit maximum space */ 713 /* try limit maximum space */
715 if (space > 16384) 714 if (space > 16384)
716 return -ENOMEM; 715 return -ENOMEM;
diff --git a/sound/core/init.c b/sound/core/init.c
index 30ecad41403c..2c041bb36ab3 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -342,7 +342,6 @@ static const struct file_operations snd_shutdown_f_ops =
342int snd_card_disconnect(struct snd_card *card) 342int snd_card_disconnect(struct snd_card *card)
343{ 343{
344 struct snd_monitor_file *mfile; 344 struct snd_monitor_file *mfile;
345 struct file *file;
346 int err; 345 int err;
347 346
348 if (!card) 347 if (!card)
@@ -366,8 +365,6 @@ int snd_card_disconnect(struct snd_card *card)
366 365
367 spin_lock(&card->files_lock); 366 spin_lock(&card->files_lock);
368 list_for_each_entry(mfile, &card->files_list, list) { 367 list_for_each_entry(mfile, &card->files_list, list) {
369 file = mfile->file;
370
371 /* it's critical part, use endless loop */ 368 /* it's critical part, use endless loop */
372 /* we have no room to fail */ 369 /* we have no room to fail */
373 mfile->disconnected_f_op = mfile->file->f_op; 370 mfile->disconnected_f_op = mfile->file->f_op;
diff --git a/sound/core/oss/linear.c b/sound/core/oss/linear.c
index 13b3f6f49fae..2045697f449d 100644
--- a/sound/core/oss/linear.c
+++ b/sound/core/oss/linear.c
@@ -90,11 +90,8 @@ static snd_pcm_sframes_t linear_transfer(struct snd_pcm_plugin *plugin,
90 struct snd_pcm_plugin_channel *dst_channels, 90 struct snd_pcm_plugin_channel *dst_channels,
91 snd_pcm_uframes_t frames) 91 snd_pcm_uframes_t frames)
92{ 92{
93 struct linear_priv *data;
94
95 if (snd_BUG_ON(!plugin || !src_channels || !dst_channels)) 93 if (snd_BUG_ON(!plugin || !src_channels || !dst_channels))
96 return -ENXIO; 94 return -ENXIO;
97 data = (struct linear_priv *)plugin->extra_data;
98 if (frames == 0) 95 if (frames == 0)
99 return 0; 96 return 0;
100#ifdef CONFIG_SND_DEBUG 97#ifdef CONFIG_SND_DEBUG
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index abfeff1611ce..f1341308beda 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1756,8 +1756,18 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
1756 wait_queue_t wait; 1756 wait_queue_t wait;
1757 int err = 0; 1757 int err = 0;
1758 snd_pcm_uframes_t avail = 0; 1758 snd_pcm_uframes_t avail = 0;
1759 long tout; 1759 long wait_time, tout;
1760 1760
1761 if (runtime->no_period_wakeup)
1762 wait_time = MAX_SCHEDULE_TIMEOUT;
1763 else {
1764 wait_time = 10;
1765 if (runtime->rate) {
1766 long t = runtime->period_size * 2 / runtime->rate;
1767 wait_time = max(t, wait_time);
1768 }
1769 wait_time = msecs_to_jiffies(wait_time * 1000);
1770 }
1761 init_waitqueue_entry(&wait, current); 1771 init_waitqueue_entry(&wait, current);
1762 add_wait_queue(&runtime->tsleep, &wait); 1772 add_wait_queue(&runtime->tsleep, &wait);
1763 for (;;) { 1773 for (;;) {
@@ -1765,9 +1775,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
1765 err = -ERESTARTSYS; 1775 err = -ERESTARTSYS;
1766 break; 1776 break;
1767 } 1777 }
1768 set_current_state(TASK_INTERRUPTIBLE);
1769 snd_pcm_stream_unlock_irq(substream); 1778 snd_pcm_stream_unlock_irq(substream);
1770 tout = schedule_timeout(msecs_to_jiffies(10000)); 1779 tout = schedule_timeout_interruptible(wait_time);
1771 snd_pcm_stream_lock_irq(substream); 1780 snd_pcm_stream_lock_irq(substream);
1772 switch (runtime->status->state) { 1781 switch (runtime->status->state) {
1773 case SNDRV_PCM_STATE_SUSPENDED: 1782 case SNDRV_PCM_STATE_SUSPENDED:
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 1a07750f3836..1c6be91dfb98 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1481,11 +1481,20 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
1481 break; /* all drained */ 1481 break; /* all drained */
1482 init_waitqueue_entry(&wait, current); 1482 init_waitqueue_entry(&wait, current);
1483 add_wait_queue(&to_check->sleep, &wait); 1483 add_wait_queue(&to_check->sleep, &wait);
1484 set_current_state(TASK_INTERRUPTIBLE);
1485 snd_pcm_stream_unlock_irq(substream); 1484 snd_pcm_stream_unlock_irq(substream);
1486 up_read(&snd_pcm_link_rwsem); 1485 up_read(&snd_pcm_link_rwsem);
1487 snd_power_unlock(card); 1486 snd_power_unlock(card);
1488 tout = schedule_timeout(10 * HZ); 1487 if (runtime->no_period_wakeup)
1488 tout = MAX_SCHEDULE_TIMEOUT;
1489 else {
1490 tout = 10;
1491 if (runtime->rate) {
1492 long t = runtime->period_size * 2 / runtime->rate;
1493 tout = max(t, tout);
1494 }
1495 tout = msecs_to_jiffies(tout * 1000);
1496 }
1497 tout = schedule_timeout_interruptible(tout);
1489 snd_power_lock(card); 1498 snd_power_lock(card);
1490 down_read(&snd_pcm_link_rwsem); 1499 down_read(&snd_pcm_link_rwsem);
1491 snd_pcm_stream_lock_irq(substream); 1500 snd_pcm_stream_lock_irq(substream);
@@ -1518,13 +1527,11 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
1518static int snd_pcm_drop(struct snd_pcm_substream *substream) 1527static int snd_pcm_drop(struct snd_pcm_substream *substream)
1519{ 1528{
1520 struct snd_pcm_runtime *runtime; 1529 struct snd_pcm_runtime *runtime;
1521 struct snd_card *card;
1522 int result = 0; 1530 int result = 0;
1523 1531
1524 if (PCM_RUNTIME_CHECK(substream)) 1532 if (PCM_RUNTIME_CHECK(substream))
1525 return -ENXIO; 1533 return -ENXIO;
1526 runtime = substream->runtime; 1534 runtime = substream->runtime;
1527 card = substream->pcm->card;
1528 1535
1529 if (runtime->status->state == SNDRV_PCM_STATE_OPEN || 1536 if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1530 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED || 1537 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED ||
@@ -2056,7 +2063,6 @@ static int snd_pcm_open_file(struct file *file,
2056{ 2063{
2057 struct snd_pcm_file *pcm_file; 2064 struct snd_pcm_file *pcm_file;
2058 struct snd_pcm_substream *substream; 2065 struct snd_pcm_substream *substream;
2059 struct snd_pcm_str *str;
2060 int err; 2066 int err;
2061 2067
2062 if (rpcm_file) 2068 if (rpcm_file)
@@ -2073,7 +2079,6 @@ static int snd_pcm_open_file(struct file *file,
2073 } 2079 }
2074 pcm_file->substream = substream; 2080 pcm_file->substream = substream;
2075 if (substream->ref_count == 1) { 2081 if (substream->ref_count == 1) {
2076 str = substream->pstr;
2077 substream->file = pcm_file; 2082 substream->file = pcm_file;
2078 substream->pcm_release = pcm_release_private; 2083 substream->pcm_release = pcm_release_private;
2079 } 2084 }
@@ -3015,11 +3020,9 @@ static const struct vm_operations_struct snd_pcm_vm_ops_status =
3015static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file, 3020static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3016 struct vm_area_struct *area) 3021 struct vm_area_struct *area)
3017{ 3022{
3018 struct snd_pcm_runtime *runtime;
3019 long size; 3023 long size;
3020 if (!(area->vm_flags & VM_READ)) 3024 if (!(area->vm_flags & VM_READ))
3021 return -EINVAL; 3025 return -EINVAL;
3022 runtime = substream->runtime;
3023 size = area->vm_end - area->vm_start; 3026 size = area->vm_end - area->vm_start;
3024 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status))) 3027 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status)))
3025 return -EINVAL; 3028 return -EINVAL;
@@ -3054,11 +3057,9 @@ static const struct vm_operations_struct snd_pcm_vm_ops_control =
3054static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file, 3057static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3055 struct vm_area_struct *area) 3058 struct vm_area_struct *area)
3056{ 3059{
3057 struct snd_pcm_runtime *runtime;
3058 long size; 3060 long size;
3059 if (!(area->vm_flags & VM_READ)) 3061 if (!(area->vm_flags & VM_READ))
3060 return -EINVAL; 3062 return -EINVAL;
3061 runtime = substream->runtime;
3062 size = area->vm_end - area->vm_start; 3063 size = area->vm_end - area->vm_start;
3063 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control))) 3064 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)))
3064 return -EINVAL; 3065 return -EINVAL;
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
index e7a8e9e4edb2..f9077361c119 100644
--- a/sound/core/seq/seq_queue.c
+++ b/sound/core/seq/seq_queue.c
@@ -467,13 +467,11 @@ int snd_seq_queue_timer_open(int queueid)
467int snd_seq_queue_timer_close(int queueid) 467int snd_seq_queue_timer_close(int queueid)
468{ 468{
469 struct snd_seq_queue *queue; 469 struct snd_seq_queue *queue;
470 struct snd_seq_timer *tmr;
471 int result = 0; 470 int result = 0;
472 471
473 queue = queueptr(queueid); 472 queue = queueptr(queueid);
474 if (queue == NULL) 473 if (queue == NULL)
475 return -EINVAL; 474 return -EINVAL;
476 tmr = queue->timer;
477 snd_seq_timer_close(queue); 475 snd_seq_timer_close(queue);
478 queuefree(queue); 476 queuefree(queue);
479 return result; 477 return result;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 8edd998509f7..45b4a8d70e08 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -4719,7 +4719,7 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec,
4719 cfg->dig_out_pins[0], cfg->dig_out_pins[1]); 4719 cfg->dig_out_pins[0], cfg->dig_out_pins[1]);
4720 snd_printd(" inputs:"); 4720 snd_printd(" inputs:");
4721 for (i = 0; i < cfg->num_inputs; i++) { 4721 for (i = 0; i < cfg->num_inputs; i++) {
4722 snd_printdd(" %s=0x%x", 4722 snd_printd(" %s=0x%x",
4723 hda_get_autocfg_input_label(codec, cfg, i), 4723 hda_get_autocfg_input_label(codec, cfg, i),
4724 cfg->inputs[i].pin); 4724 cfg->inputs[i].pin);
4725 } 4725 }
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index 74b0560289c0..b05f7be9dc1b 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -312,23 +312,6 @@ out_fail:
312 return -EINVAL; 312 return -EINVAL;
313} 313}
314 314
315static int hdmi_eld_valid(struct hda_codec *codec, hda_nid_t nid)
316{
317 int eldv;
318 int present;
319
320 present = snd_hda_pin_sense(codec, nid);
321 eldv = (present & AC_PINSENSE_ELDV);
322 present = (present & AC_PINSENSE_PRESENCE);
323
324#ifdef CONFIG_SND_DEBUG_VERBOSE
325 printk(KERN_INFO "HDMI: sink_present = %d, eld_valid = %d\n",
326 !!present, !!eldv);
327#endif
328
329 return eldv && present;
330}
331
332int snd_hdmi_get_eld_size(struct hda_codec *codec, hda_nid_t nid) 315int snd_hdmi_get_eld_size(struct hda_codec *codec, hda_nid_t nid)
333{ 316{
334 return snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_HDMI_DIP_SIZE, 317 return snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_HDMI_DIP_SIZE,
@@ -343,7 +326,7 @@ int snd_hdmi_get_eld(struct hdmi_eld *eld,
343 int size; 326 int size;
344 unsigned char *buf; 327 unsigned char *buf;
345 328
346 if (!hdmi_eld_valid(codec, nid)) 329 if (!eld->eld_valid)
347 return -ENOENT; 330 return -ENOENT;
348 331
349 size = snd_hdmi_get_eld_size(codec, nid); 332 size = snd_hdmi_get_eld_size(codec, nid);
@@ -477,6 +460,8 @@ static void hdmi_print_eld_info(struct snd_info_entry *entry,
477 460
478 snd_iprintf(buffer, "monitor_present\t\t%d\n", e->monitor_present); 461 snd_iprintf(buffer, "monitor_present\t\t%d\n", e->monitor_present);
479 snd_iprintf(buffer, "eld_valid\t\t%d\n", e->eld_valid); 462 snd_iprintf(buffer, "eld_valid\t\t%d\n", e->eld_valid);
463 if (!e->eld_valid)
464 return;
480 snd_iprintf(buffer, "monitor_name\t\t%s\n", e->monitor_name); 465 snd_iprintf(buffer, "monitor_name\t\t%s\n", e->monitor_name);
481 snd_iprintf(buffer, "connection_type\t\t%s\n", 466 snd_iprintf(buffer, "connection_type\t\t%s\n",
482 eld_connection_type_names[e->conn_type]); 467 eld_connection_type_names[e->conn_type]);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 43a036716d25..486f6deb3eee 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -391,6 +391,7 @@ struct azx {
391 391
392 /* chip type specific */ 392 /* chip type specific */
393 int driver_type; 393 int driver_type;
394 unsigned int driver_caps;
394 int playback_streams; 395 int playback_streams;
395 int playback_index_offset; 396 int playback_index_offset;
396 int capture_streams; 397 int capture_streams;
@@ -464,6 +465,34 @@ enum {
464 AZX_NUM_DRIVERS, /* keep this as last entry */ 465 AZX_NUM_DRIVERS, /* keep this as last entry */
465}; 466};
466 467
468/* driver quirks (capabilities) */
469/* bits 0-7 are used for indicating driver type */
470#define AZX_DCAPS_NO_TCSEL (1 << 8) /* No Intel TCSEL bit */
471#define AZX_DCAPS_NO_MSI (1 << 9) /* No MSI support */
472#define AZX_DCAPS_ATI_SNOOP (1 << 10) /* ATI snoop enable */
473#define AZX_DCAPS_NVIDIA_SNOOP (1 << 11) /* Nvidia snoop enable */
474#define AZX_DCAPS_SCH_SNOOP (1 << 12) /* SCH/PCH snoop enable */
475#define AZX_DCAPS_RIRB_DELAY (1 << 13) /* Long delay in read loop */
476#define AZX_DCAPS_RIRB_PRE_DELAY (1 << 14) /* Put a delay before read */
477#define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
478#define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
479#define AZX_DCAPS_POSFIX_VIA (1 << 17) /* Use VIACOMBO as default */
480#define AZX_DCAPS_NO_64BIT (1 << 18) /* No 64bit address */
481#define AZX_DCAPS_SYNC_WRITE (1 << 19) /* sync each cmd write */
482
483/* quirks for ATI SB / AMD Hudson */
484#define AZX_DCAPS_PRESET_ATI_SB \
485 (AZX_DCAPS_ATI_SNOOP | AZX_DCAPS_NO_TCSEL | \
486 AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB)
487
488/* quirks for ATI/AMD HDMI */
489#define AZX_DCAPS_PRESET_ATI_HDMI \
490 (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB)
491
492/* quirks for Nvidia */
493#define AZX_DCAPS_PRESET_NVIDIA \
494 (AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI)
495
467static char *driver_short_names[] __devinitdata = { 496static char *driver_short_names[] __devinitdata = {
468 [AZX_DRIVER_ICH] = "HDA Intel", 497 [AZX_DRIVER_ICH] = "HDA Intel",
469 [AZX_DRIVER_PCH] = "HDA Intel PCH", 498 [AZX_DRIVER_PCH] = "HDA Intel PCH",
@@ -566,7 +595,7 @@ static void azx_init_cmd_io(struct azx *chip)
566 /* reset the rirb hw write pointer */ 595 /* reset the rirb hw write pointer */
567 azx_writew(chip, RIRBWP, ICH6_RIRBWP_RST); 596 azx_writew(chip, RIRBWP, ICH6_RIRBWP_RST);
568 /* set N=1, get RIRB response interrupt for new entry */ 597 /* set N=1, get RIRB response interrupt for new entry */
569 if (chip->driver_type == AZX_DRIVER_CTX) 598 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
570 azx_writew(chip, RINTCNT, 0xc0); 599 azx_writew(chip, RINTCNT, 0xc0);
571 else 600 else
572 azx_writew(chip, RINTCNT, 1); 601 azx_writew(chip, RINTCNT, 1);
@@ -1056,19 +1085,24 @@ static void azx_init_pci(struct azx *chip)
1056 * codecs. 1085 * codecs.
1057 * The PCI register TCSEL is defined in the Intel manuals. 1086 * The PCI register TCSEL is defined in the Intel manuals.
1058 */ 1087 */
1059 if (chip->driver_type != AZX_DRIVER_ATI && 1088 if (!(chip->driver_caps & AZX_DCAPS_NO_TCSEL)) {
1060 chip->driver_type != AZX_DRIVER_ATIHDMI) 1089 snd_printdd(SFX "Clearing TCSEL\n");
1061 update_pci_byte(chip->pci, ICH6_PCIREG_TCSEL, 0x07, 0); 1090 update_pci_byte(chip->pci, ICH6_PCIREG_TCSEL, 0x07, 0);
1091 }
1062 1092
1063 switch (chip->driver_type) { 1093 /* For ATI SB450/600/700/800/900 and AMD Hudson azalia HD audio,
1064 case AZX_DRIVER_ATI: 1094 * we need to enable snoop.
1065 /* For ATI SB450 azalia HD audio, we need to enable snoop */ 1095 */
1096 if (chip->driver_caps & AZX_DCAPS_ATI_SNOOP) {
1097 snd_printdd(SFX "Enabling ATI snoop\n");
1066 update_pci_byte(chip->pci, 1098 update_pci_byte(chip->pci,
1067 ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR, 1099 ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR,
1068 0x07, ATI_SB450_HDAUDIO_ENABLE_SNOOP); 1100 0x07, ATI_SB450_HDAUDIO_ENABLE_SNOOP);
1069 break; 1101 }
1070 case AZX_DRIVER_NVIDIA: 1102
1071 /* For NVIDIA HDA, enable snoop */ 1103 /* For NVIDIA HDA, enable snoop */
1104 if (chip->driver_caps & AZX_DCAPS_NVIDIA_SNOOP) {
1105 snd_printdd(SFX "Enabling Nvidia snoop\n");
1072 update_pci_byte(chip->pci, 1106 update_pci_byte(chip->pci,
1073 NVIDIA_HDA_TRANSREG_ADDR, 1107 NVIDIA_HDA_TRANSREG_ADDR,
1074 0x0f, NVIDIA_HDA_ENABLE_COHBITS); 1108 0x0f, NVIDIA_HDA_ENABLE_COHBITS);
@@ -1078,9 +1112,10 @@ static void azx_init_pci(struct azx *chip)
1078 update_pci_byte(chip->pci, 1112 update_pci_byte(chip->pci,
1079 NVIDIA_HDA_OSTRM_COH, 1113 NVIDIA_HDA_OSTRM_COH,
1080 0x01, NVIDIA_HDA_ENABLE_COHBIT); 1114 0x01, NVIDIA_HDA_ENABLE_COHBIT);
1081 break; 1115 }
1082 case AZX_DRIVER_SCH: 1116
1083 case AZX_DRIVER_PCH: 1117 /* Enable SCH/PCH snoop if needed */
1118 if (chip->driver_caps & AZX_DCAPS_SCH_SNOOP) {
1084 pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop); 1119 pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop);
1085 if (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) { 1120 if (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) {
1086 pci_write_config_word(chip->pci, INTEL_SCH_HDA_DEVC, 1121 pci_write_config_word(chip->pci, INTEL_SCH_HDA_DEVC,
@@ -1091,14 +1126,6 @@ static void azx_init_pci(struct azx *chip)
1091 (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) 1126 (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP)
1092 ? "Failed" : "OK"); 1127 ? "Failed" : "OK");
1093 } 1128 }
1094 break;
1095 default:
1096 /* AMD Hudson needs the similar snoop, as it seems... */
1097 if (chip->pci->vendor == PCI_VENDOR_ID_AMD)
1098 update_pci_byte(chip->pci,
1099 ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR,
1100 0x07, ATI_SB450_HDAUDIO_ENABLE_SNOOP);
1101 break;
1102 } 1129 }
1103} 1130}
1104 1131
@@ -1152,7 +1179,7 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
1152 status = azx_readb(chip, RIRBSTS); 1179 status = azx_readb(chip, RIRBSTS);
1153 if (status & RIRB_INT_MASK) { 1180 if (status & RIRB_INT_MASK) {
1154 if (status & RIRB_INT_RESPONSE) { 1181 if (status & RIRB_INT_RESPONSE) {
1155 if (chip->driver_type == AZX_DRIVER_CTX) 1182 if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
1156 udelay(80); 1183 udelay(80);
1157 azx_update_rirb(chip); 1184 azx_update_rirb(chip);
1158 } 1185 }
@@ -1421,8 +1448,10 @@ static int __devinit azx_codec_create(struct azx *chip, const char *model)
1421 if (err < 0) 1448 if (err < 0)
1422 return err; 1449 return err;
1423 1450
1424 if (chip->driver_type == AZX_DRIVER_NVIDIA) 1451 if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
1452 snd_printd(SFX "Enable delay in RIRB handling\n");
1425 chip->bus->needs_damn_long_delay = 1; 1453 chip->bus->needs_damn_long_delay = 1;
1454 }
1426 1455
1427 codecs = 0; 1456 codecs = 0;
1428 max_slots = azx_max_codecs[chip->driver_type]; 1457 max_slots = azx_max_codecs[chip->driver_type];
@@ -1457,9 +1486,8 @@ static int __devinit azx_codec_create(struct azx *chip, const char *model)
1457 * sequence like the pin-detection. It seems that forcing the synced 1486 * sequence like the pin-detection. It seems that forcing the synced
1458 * access works around the stall. Grrr... 1487 * access works around the stall. Grrr...
1459 */ 1488 */
1460 if (chip->pci->vendor == PCI_VENDOR_ID_AMD || 1489 if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1461 chip->pci->vendor == PCI_VENDOR_ID_ATI) { 1490 snd_printd(SFX "Enable sync_write for stable communication\n");
1462 snd_printk(KERN_INFO SFX "Enable sync_write for AMD chipset\n");
1463 chip->bus->sync_write = 1; 1491 chip->bus->sync_write = 1;
1464 chip->bus->allow_bus_reset = 1; 1492 chip->bus->allow_bus_reset = 1;
1465 } 1493 }
@@ -1720,7 +1748,7 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
1720 1748
1721 stream_tag = azx_dev->stream_tag; 1749 stream_tag = azx_dev->stream_tag;
1722 /* CA-IBG chips need the playback stream starting from 1 */ 1750 /* CA-IBG chips need the playback stream starting from 1 */
1723 if (chip->driver_type == AZX_DRIVER_CTX && 1751 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
1724 stream_tag > chip->capture_streams) 1752 stream_tag > chip->capture_streams)
1725 stream_tag -= chip->capture_streams; 1753 stream_tag -= chip->capture_streams;
1726 return snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag, 1754 return snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
@@ -2365,20 +2393,14 @@ static int __devinit check_position_fix(struct azx *chip, int fix)
2365 } 2393 }
2366 2394
2367 /* Check VIA/ATI HD Audio Controller exist */ 2395 /* Check VIA/ATI HD Audio Controller exist */
2368 switch (chip->driver_type) { 2396 if (chip->driver_caps & AZX_DCAPS_POSFIX_VIA) {
2369 case AZX_DRIVER_VIA: 2397 snd_printd(SFX "Using VIACOMBO position fix\n");
2370 /* Use link position directly, avoid any transfer problem. */
2371 return POS_FIX_VIACOMBO; 2398 return POS_FIX_VIACOMBO;
2372 case AZX_DRIVER_ATI: 2399 }
2373 /* ATI chipsets don't work well with position-buffer */ 2400 if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) {
2401 snd_printd(SFX "Using LPIB position fix\n");
2374 return POS_FIX_LPIB; 2402 return POS_FIX_LPIB;
2375 case AZX_DRIVER_GENERIC:
2376 /* AMD chipsets also don't work with position-buffer */
2377 if (chip->pci->vendor == PCI_VENDOR_ID_AMD)
2378 return POS_FIX_LPIB;
2379 break;
2380 } 2403 }
2381
2382 return POS_FIX_AUTO; 2404 return POS_FIX_AUTO;
2383} 2405}
2384 2406
@@ -2460,8 +2482,8 @@ static void __devinit check_msi(struct azx *chip)
2460 } 2482 }
2461 2483
2462 /* NVidia chipsets seem to cause troubles with MSI */ 2484 /* NVidia chipsets seem to cause troubles with MSI */
2463 if (chip->driver_type == AZX_DRIVER_NVIDIA) { 2485 if (chip->driver_caps & AZX_DCAPS_NO_MSI) {
2464 printk(KERN_INFO "hda_intel: Disable MSI for Nvidia chipset\n"); 2486 printk(KERN_INFO "hda_intel: Disabling MSI\n");
2465 chip->msi = 0; 2487 chip->msi = 0;
2466 } 2488 }
2467} 2489}
@@ -2471,7 +2493,7 @@ static void __devinit check_msi(struct azx *chip)
2471 * constructor 2493 * constructor
2472 */ 2494 */
2473static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci, 2495static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2474 int dev, int driver_type, 2496 int dev, unsigned int driver_caps,
2475 struct azx **rchip) 2497 struct azx **rchip)
2476{ 2498{
2477 struct azx *chip; 2499 struct azx *chip;
@@ -2499,7 +2521,8 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2499 chip->card = card; 2521 chip->card = card;
2500 chip->pci = pci; 2522 chip->pci = pci;
2501 chip->irq = -1; 2523 chip->irq = -1;
2502 chip->driver_type = driver_type; 2524 chip->driver_caps = driver_caps;
2525 chip->driver_type = driver_caps & 0xff;
2503 check_msi(chip); 2526 check_msi(chip);
2504 chip->dev_index = dev; 2527 chip->dev_index = dev;
2505 INIT_WORK(&chip->irq_pending_work, azx_irq_pending_work); 2528 INIT_WORK(&chip->irq_pending_work, azx_irq_pending_work);
@@ -2563,8 +2586,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2563 snd_printdd(SFX "chipset global capabilities = 0x%x\n", gcap); 2586 snd_printdd(SFX "chipset global capabilities = 0x%x\n", gcap);
2564 2587
2565 /* disable SB600 64bit support for safety */ 2588 /* disable SB600 64bit support for safety */
2566 if ((chip->driver_type == AZX_DRIVER_ATI) || 2589 if (chip->pci->vendor == PCI_VENDOR_ID_ATI) {
2567 (chip->driver_type == AZX_DRIVER_ATIHDMI)) {
2568 struct pci_dev *p_smbus; 2590 struct pci_dev *p_smbus;
2569 p_smbus = pci_get_device(PCI_VENDOR_ID_ATI, 2591 p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
2570 PCI_DEVICE_ID_ATI_SBX00_SMBUS, 2592 PCI_DEVICE_ID_ATI_SBX00_SMBUS,
@@ -2574,19 +2596,13 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2574 gcap &= ~ICH6_GCAP_64OK; 2596 gcap &= ~ICH6_GCAP_64OK;
2575 pci_dev_put(p_smbus); 2597 pci_dev_put(p_smbus);
2576 } 2598 }
2577 } else {
2578 /* FIXME: not sure whether this is really needed, but
2579 * Hudson isn't stable enough for allowing everything...
2580 * let's check later again.
2581 */
2582 if (chip->pci->vendor == PCI_VENDOR_ID_AMD)
2583 gcap &= ~ICH6_GCAP_64OK;
2584 } 2599 }
2585 2600
2586 /* disable 64bit DMA address for Teradici */ 2601 /* disable 64bit DMA address on some devices */
2587 /* it does not work with device 6549:1200 subsys e4a2:040b */ 2602 if (chip->driver_caps & AZX_DCAPS_NO_64BIT) {
2588 if (chip->driver_type == AZX_DRIVER_TERA) 2603 snd_printd(SFX "Disabling 64bit DMA\n");
2589 gcap &= ~ICH6_GCAP_64OK; 2604 gcap &= ~ICH6_GCAP_64OK;
2605 }
2590 2606
2591 /* allow 64bit DMA address if supported by H/W */ 2607 /* allow 64bit DMA address if supported by H/W */
2592 if ((gcap & ICH6_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64))) 2608 if ((gcap & ICH6_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
@@ -2788,38 +2804,62 @@ static void __devexit azx_remove(struct pci_dev *pci)
2788/* PCI IDs */ 2804/* PCI IDs */
2789static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { 2805static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
2790 /* CPT */ 2806 /* CPT */
2791 { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH }, 2807 { PCI_DEVICE(0x8086, 0x1c20),
2808 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP },
2792 /* PBG */ 2809 /* PBG */
2793 { PCI_DEVICE(0x8086, 0x1d20), .driver_data = AZX_DRIVER_PCH }, 2810 { PCI_DEVICE(0x8086, 0x1d20),
2811 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP },
2794 /* Panther Point */ 2812 /* Panther Point */
2795 { PCI_DEVICE(0x8086, 0x1e20), .driver_data = AZX_DRIVER_PCH }, 2813 { PCI_DEVICE(0x8086, 0x1e20),
2814 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP },
2796 /* SCH */ 2815 /* SCH */
2797 { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH }, 2816 { PCI_DEVICE(0x8086, 0x811b),
2817 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP },
2798 /* Generic Intel */ 2818 /* Generic Intel */
2799 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_ANY_ID), 2819 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_ANY_ID),
2800 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, 2820 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
2801 .class_mask = 0xffffff, 2821 .class_mask = 0xffffff,
2802 .driver_data = AZX_DRIVER_ICH }, 2822 .driver_data = AZX_DRIVER_ICH },
2803 /* ATI SB 450/600 */ 2823 /* ATI SB 450/600/700/800/900 */
2804 { PCI_DEVICE(0x1002, 0x437b), .driver_data = AZX_DRIVER_ATI }, 2824 { PCI_DEVICE(0x1002, 0x437b),
2805 { PCI_DEVICE(0x1002, 0x4383), .driver_data = AZX_DRIVER_ATI }, 2825 .driver_data = AZX_DRIVER_ATI | AZX_DCAPS_PRESET_ATI_SB },
2826 { PCI_DEVICE(0x1002, 0x4383),
2827 .driver_data = AZX_DRIVER_ATI | AZX_DCAPS_PRESET_ATI_SB },
2828 /* AMD Hudson */
2829 { PCI_DEVICE(0x1022, 0x780d),
2830 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
2806 /* ATI HDMI */ 2831 /* ATI HDMI */
2807 { PCI_DEVICE(0x1002, 0x793b), .driver_data = AZX_DRIVER_ATIHDMI }, 2832 { PCI_DEVICE(0x1002, 0x793b),
2808 { PCI_DEVICE(0x1002, 0x7919), .driver_data = AZX_DRIVER_ATIHDMI }, 2833 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2809 { PCI_DEVICE(0x1002, 0x960f), .driver_data = AZX_DRIVER_ATIHDMI }, 2834 { PCI_DEVICE(0x1002, 0x7919),
2810 { PCI_DEVICE(0x1002, 0x970f), .driver_data = AZX_DRIVER_ATIHDMI }, 2835 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2811 { PCI_DEVICE(0x1002, 0xaa00), .driver_data = AZX_DRIVER_ATIHDMI }, 2836 { PCI_DEVICE(0x1002, 0x960f),
2812 { PCI_DEVICE(0x1002, 0xaa08), .driver_data = AZX_DRIVER_ATIHDMI }, 2837 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2813 { PCI_DEVICE(0x1002, 0xaa10), .driver_data = AZX_DRIVER_ATIHDMI }, 2838 { PCI_DEVICE(0x1002, 0x970f),
2814 { PCI_DEVICE(0x1002, 0xaa18), .driver_data = AZX_DRIVER_ATIHDMI }, 2839 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2815 { PCI_DEVICE(0x1002, 0xaa20), .driver_data = AZX_DRIVER_ATIHDMI }, 2840 { PCI_DEVICE(0x1002, 0xaa00),
2816 { PCI_DEVICE(0x1002, 0xaa28), .driver_data = AZX_DRIVER_ATIHDMI }, 2841 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2817 { PCI_DEVICE(0x1002, 0xaa30), .driver_data = AZX_DRIVER_ATIHDMI }, 2842 { PCI_DEVICE(0x1002, 0xaa08),
2818 { PCI_DEVICE(0x1002, 0xaa38), .driver_data = AZX_DRIVER_ATIHDMI }, 2843 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2819 { PCI_DEVICE(0x1002, 0xaa40), .driver_data = AZX_DRIVER_ATIHDMI }, 2844 { PCI_DEVICE(0x1002, 0xaa10),
2820 { PCI_DEVICE(0x1002, 0xaa48), .driver_data = AZX_DRIVER_ATIHDMI }, 2845 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2846 { PCI_DEVICE(0x1002, 0xaa18),
2847 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2848 { PCI_DEVICE(0x1002, 0xaa20),
2849 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2850 { PCI_DEVICE(0x1002, 0xaa28),
2851 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2852 { PCI_DEVICE(0x1002, 0xaa30),
2853 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2854 { PCI_DEVICE(0x1002, 0xaa38),
2855 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2856 { PCI_DEVICE(0x1002, 0xaa40),
2857 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2858 { PCI_DEVICE(0x1002, 0xaa48),
2859 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2821 /* VIA VT8251/VT8237A */ 2860 /* VIA VT8251/VT8237A */
2822 { PCI_DEVICE(0x1106, 0x3288), .driver_data = AZX_DRIVER_VIA }, 2861 { PCI_DEVICE(0x1106, 0x3288),
2862 .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
2823 /* SIS966 */ 2863 /* SIS966 */
2824 { PCI_DEVICE(0x1039, 0x7502), .driver_data = AZX_DRIVER_SIS }, 2864 { PCI_DEVICE(0x1039, 0x7502), .driver_data = AZX_DRIVER_SIS },
2825 /* ULI M5461 */ 2865 /* ULI M5461 */
@@ -2828,9 +2868,10 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
2828 { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID), 2868 { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
2829 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, 2869 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
2830 .class_mask = 0xffffff, 2870 .class_mask = 0xffffff,
2831 .driver_data = AZX_DRIVER_NVIDIA }, 2871 .driver_data = AZX_DRIVER_NVIDIA | AZX_DCAPS_PRESET_NVIDIA },
2832 /* Teradici */ 2872 /* Teradici */
2833 { PCI_DEVICE(0x6549, 0x1200), .driver_data = AZX_DRIVER_TERA }, 2873 { PCI_DEVICE(0x6549, 0x1200),
2874 .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT },
2834 /* Creative X-Fi (CA0110-IBG) */ 2875 /* Creative X-Fi (CA0110-IBG) */
2835#if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE) 2876#if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE)
2836 /* the following entry conflicts with snd-ctxfi driver, 2877 /* the following entry conflicts with snd-ctxfi driver,
@@ -2840,10 +2881,13 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
2840 { PCI_DEVICE(PCI_VENDOR_ID_CREATIVE, PCI_ANY_ID), 2881 { PCI_DEVICE(PCI_VENDOR_ID_CREATIVE, PCI_ANY_ID),
2841 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, 2882 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
2842 .class_mask = 0xffffff, 2883 .class_mask = 0xffffff,
2843 .driver_data = AZX_DRIVER_CTX }, 2884 .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
2885 AZX_DCAPS_RIRB_PRE_DELAY },
2844#else 2886#else
2845 /* this entry seems still valid -- i.e. without emu20kx chip */ 2887 /* this entry seems still valid -- i.e. without emu20kx chip */
2846 { PCI_DEVICE(0x1102, 0x0009), .driver_data = AZX_DRIVER_CTX }, 2888 { PCI_DEVICE(0x1102, 0x0009),
2889 .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
2890 AZX_DCAPS_RIRB_PRE_DELAY },
2847#endif 2891#endif
2848 /* Vortex86MX */ 2892 /* Vortex86MX */
2849 { PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC }, 2893 { PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC },
@@ -2853,11 +2897,11 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
2853 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_ANY_ID), 2897 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_ANY_ID),
2854 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, 2898 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
2855 .class_mask = 0xffffff, 2899 .class_mask = 0xffffff,
2856 .driver_data = AZX_DRIVER_GENERIC }, 2900 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI },
2857 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_ANY_ID), 2901 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_ANY_ID),
2858 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, 2902 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
2859 .class_mask = 0xffffff, 2903 .class_mask = 0xffffff,
2860 .driver_data = AZX_DRIVER_GENERIC }, 2904 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI },
2861 { 0, } 2905 { 0, }
2862}; 2906};
2863MODULE_DEVICE_TABLE(pci, azx_ids); 2907MODULE_DEVICE_TABLE(pci, azx_ids);
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index f1b3875c57df..696ac2590307 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -3159,6 +3159,7 @@ static const struct snd_pci_quirk ad1988_cfg_tbl[] = {
3159 SND_PCI_QUIRK(0x1043, 0x81ec, "Asus P5B-DLX", AD1988_6STACK_DIG), 3159 SND_PCI_QUIRK(0x1043, 0x81ec, "Asus P5B-DLX", AD1988_6STACK_DIG),
3160 SND_PCI_QUIRK(0x1043, 0x81f6, "Asus M2N-SLI", AD1988_6STACK_DIG), 3160 SND_PCI_QUIRK(0x1043, 0x81f6, "Asus M2N-SLI", AD1988_6STACK_DIG),
3161 SND_PCI_QUIRK(0x1043, 0x8277, "Asus P5K-E/WIFI-AP", AD1988_6STACK_DIG), 3161 SND_PCI_QUIRK(0x1043, 0x8277, "Asus P5K-E/WIFI-AP", AD1988_6STACK_DIG),
3162 SND_PCI_QUIRK(0x1043, 0x82c0, "Asus M3N-HT Deluxe", AD1988_6STACK_DIG),
3162 SND_PCI_QUIRK(0x1043, 0x8311, "Asus P5Q-Premium/Pro", AD1988_6STACK_DIG), 3163 SND_PCI_QUIRK(0x1043, 0x8311, "Asus P5Q-Premium/Pro", AD1988_6STACK_DIG),
3163 {} 3164 {}
3164}; 3165};
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 4f37477d3c71..3e6b9a8539c2 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3098,7 +3098,9 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
3098 SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), 3098 SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
3099 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD), 3099 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
3100 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD), 3100 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD),
3101 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS),
3101 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS), 3102 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
3103 SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G565", CXT5066_AUTO),
3102 SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */ 3104 SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */
3103 {} 3105 {}
3104}; 3106};
@@ -3433,7 +3435,9 @@ static void cx_auto_parse_output(struct hda_codec *codec)
3433 break; 3435 break;
3434 } 3436 }
3435 } 3437 }
3436 if (spec->auto_mute && cfg->line_out_pins[0] && 3438 if (spec->auto_mute &&
3439 cfg->line_out_pins[0] &&
3440 cfg->line_out_type != AUTO_PIN_SPEAKER_OUT &&
3437 cfg->line_out_pins[0] != cfg->hp_pins[0] && 3441 cfg->line_out_pins[0] != cfg->hp_pins[0] &&
3438 cfg->line_out_pins[0] != cfg->speaker_pins[0]) { 3442 cfg->line_out_pins[0] != cfg->speaker_pins[0]) {
3439 for (i = 0; i < cfg->line_outs; i++) { 3443 for (i = 0; i < cfg->line_outs; i++) {
@@ -3481,25 +3485,32 @@ static void cx_auto_update_speakers(struct hda_codec *codec)
3481{ 3485{
3482 struct conexant_spec *spec = codec->spec; 3486 struct conexant_spec *spec = codec->spec;
3483 struct auto_pin_cfg *cfg = &spec->autocfg; 3487 struct auto_pin_cfg *cfg = &spec->autocfg;
3484 int on; 3488 int on = 1;
3485 3489
3486 if (!spec->auto_mute) 3490 /* turn on HP EAPD when HP jacks are present */
3487 on = 0; 3491 if (spec->auto_mute)
3488 else 3492 on = spec->hp_present;
3489 on = spec->hp_present | spec->line_present;
3490 cx_auto_turn_eapd(codec, cfg->hp_outs, cfg->hp_pins, on); 3493 cx_auto_turn_eapd(codec, cfg->hp_outs, cfg->hp_pins, on);
3491 do_automute(codec, cfg->speaker_outs, cfg->speaker_pins, !on); 3494 /* mute speakers in auto-mode if HP or LO jacks are plugged */
3495 if (spec->auto_mute)
3496 on = !(spec->hp_present ||
3497 (spec->detect_line && spec->line_present));
3498 do_automute(codec, cfg->speaker_outs, cfg->speaker_pins, on);
3492 3499
3493 /* toggle line-out mutes if needed, too */ 3500 /* toggle line-out mutes if needed, too */
3494 /* if LO is a copy of either HP or Speaker, don't need to handle it */ 3501 /* if LO is a copy of either HP or Speaker, don't need to handle it */
3495 if (cfg->line_out_pins[0] == cfg->hp_pins[0] || 3502 if (cfg->line_out_pins[0] == cfg->hp_pins[0] ||
3496 cfg->line_out_pins[0] == cfg->speaker_pins[0]) 3503 cfg->line_out_pins[0] == cfg->speaker_pins[0])
3497 return; 3504 return;
3498 if (!spec->automute_lines || !spec->auto_mute) 3505 if (spec->auto_mute) {
3499 on = 0; 3506 /* mute LO in auto-mode when HP jack is present */
3500 else 3507 if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT ||
3501 on = spec->hp_present; 3508 spec->automute_lines)
3502 do_automute(codec, cfg->line_outs, cfg->line_out_pins, !on); 3509 on = !spec->hp_present;
3510 else
3511 on = 1;
3512 }
3513 do_automute(codec, cfg->line_outs, cfg->line_out_pins, on);
3503} 3514}
3504 3515
3505static void cx_auto_hp_automute(struct hda_codec *codec) 3516static void cx_auto_hp_automute(struct hda_codec *codec)
@@ -3696,13 +3707,14 @@ static int cx_auto_mux_enum_update(struct hda_codec *codec,
3696{ 3707{
3697 struct conexant_spec *spec = codec->spec; 3708 struct conexant_spec *spec = codec->spec;
3698 hda_nid_t adc; 3709 hda_nid_t adc;
3710 int changed = 1;
3699 3711
3700 if (!imux->num_items) 3712 if (!imux->num_items)
3701 return 0; 3713 return 0;
3702 if (idx >= imux->num_items) 3714 if (idx >= imux->num_items)
3703 idx = imux->num_items - 1; 3715 idx = imux->num_items - 1;
3704 if (spec->cur_mux[0] == idx) 3716 if (spec->cur_mux[0] == idx)
3705 return 0; 3717 changed = 0;
3706 adc = spec->imux_info[idx].adc; 3718 adc = spec->imux_info[idx].adc;
3707 select_input_connection(codec, spec->imux_info[idx].adc, 3719 select_input_connection(codec, spec->imux_info[idx].adc,
3708 spec->imux_info[idx].pin); 3720 spec->imux_info[idx].pin);
@@ -3715,7 +3727,7 @@ static int cx_auto_mux_enum_update(struct hda_codec *codec,
3715 spec->cur_adc_format); 3727 spec->cur_adc_format);
3716 } 3728 }
3717 spec->cur_mux[0] = idx; 3729 spec->cur_mux[0] = idx;
3718 return 1; 3730 return changed;
3719} 3731}
3720 3732
3721static int cx_auto_mux_enum_put(struct snd_kcontrol *kcontrol, 3733static int cx_auto_mux_enum_put(struct snd_kcontrol *kcontrol,
@@ -3789,7 +3801,7 @@ static void cx_auto_check_auto_mic(struct hda_codec *codec)
3789 int pset[INPUT_PIN_ATTR_NORMAL + 1]; 3801 int pset[INPUT_PIN_ATTR_NORMAL + 1];
3790 int i; 3802 int i;
3791 3803
3792 for (i = 0; i < INPUT_PIN_ATTR_NORMAL; i++) 3804 for (i = 0; i < ARRAY_SIZE(pset); i++)
3793 pset[i] = -1; 3805 pset[i] = -1;
3794 for (i = 0; i < spec->private_imux.num_items; i++) { 3806 for (i = 0; i < spec->private_imux.num_items; i++) {
3795 hda_nid_t pin = spec->imux_info[i].pin; 3807 hda_nid_t pin = spec->imux_info[i].pin;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 322901873222..bd0ae697f9c4 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -48,8 +48,8 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
48 * 48 *
49 * The HDA correspondence of pipes/ports are converter/pin nodes. 49 * The HDA correspondence of pipes/ports are converter/pin nodes.
50 */ 50 */
51#define MAX_HDMI_CVTS 3 51#define MAX_HDMI_CVTS 4
52#define MAX_HDMI_PINS 3 52#define MAX_HDMI_PINS 4
53 53
54struct hdmi_spec { 54struct hdmi_spec {
55 int num_cvts; 55 int num_cvts;
@@ -78,10 +78,6 @@ struct hdmi_spec {
78 */ 78 */
79 struct hda_multi_out multiout; 79 struct hda_multi_out multiout;
80 const struct hda_pcm_stream *pcm_playback; 80 const struct hda_pcm_stream *pcm_playback;
81
82 /* misc flags */
83 /* PD bit indicates only the update, not the current state */
84 unsigned int old_pin_detect:1;
85}; 81};
86 82
87 83
@@ -300,13 +296,6 @@ static int hda_node_index(hda_nid_t *nids, hda_nid_t nid)
300 return -EINVAL; 296 return -EINVAL;
301} 297}
302 298
303static void hdmi_get_show_eld(struct hda_codec *codec, hda_nid_t pin_nid,
304 struct hdmi_eld *eld)
305{
306 if (!snd_hdmi_get_eld(eld, codec, pin_nid))
307 snd_hdmi_show_eld(eld);
308}
309
310#ifdef BE_PARANOID 299#ifdef BE_PARANOID
311static void hdmi_get_dip_index(struct hda_codec *codec, hda_nid_t pin_nid, 300static void hdmi_get_dip_index(struct hda_codec *codec, hda_nid_t pin_nid,
312 int *packet_index, int *byte_index) 301 int *packet_index, int *byte_index)
@@ -694,35 +683,20 @@ static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid,
694static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res) 683static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
695{ 684{
696 struct hdmi_spec *spec = codec->spec; 685 struct hdmi_spec *spec = codec->spec;
697 int tag = res >> AC_UNSOL_RES_TAG_SHIFT; 686 int pin_nid = res >> AC_UNSOL_RES_TAG_SHIFT;
698 int pind = !!(res & AC_UNSOL_RES_PD); 687 int pd = !!(res & AC_UNSOL_RES_PD);
699 int eldv = !!(res & AC_UNSOL_RES_ELDV); 688 int eldv = !!(res & AC_UNSOL_RES_ELDV);
700 int index; 689 int index;
701 690
702 printk(KERN_INFO 691 printk(KERN_INFO
703 "HDMI hot plug event: Pin=%d Presence_Detect=%d ELD_Valid=%d\n", 692 "HDMI hot plug event: Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
704 tag, pind, eldv); 693 pin_nid, pd, eldv);
705 694
706 index = hda_node_index(spec->pin, tag); 695 index = hda_node_index(spec->pin, pin_nid);
707 if (index < 0) 696 if (index < 0)
708 return; 697 return;
709 698
710 if (spec->old_pin_detect) { 699 hdmi_present_sense(codec, pin_nid, &spec->sink_eld[index]);
711 if (pind)
712 hdmi_present_sense(codec, tag, &spec->sink_eld[index]);
713 pind = spec->sink_eld[index].monitor_present;
714 }
715
716 spec->sink_eld[index].monitor_present = pind;
717 spec->sink_eld[index].eld_valid = eldv;
718
719 if (pind && eldv) {
720 hdmi_get_show_eld(codec, spec->pin[index],
721 &spec->sink_eld[index]);
722 /* TODO: do real things about ELD */
723 }
724
725 snd_hda_input_jack_report(codec, tag);
726} 700}
727 701
728static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res) 702static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -903,13 +877,33 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, hda_nid_t pin_nid)
903static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid, 877static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid,
904 struct hdmi_eld *eld) 878 struct hdmi_eld *eld)
905{ 879{
880 /*
881 * Always execute a GetPinSense verb here, even when called from
882 * hdmi_intrinsic_event; for some NVIDIA HW, the unsolicited
883 * response's PD bit is not the real PD value, but indicates that
884 * the real PD value changed. An older version of the HD-audio
885 * specification worked this way. Hence, we just ignore the data in
886 * the unsolicited response to avoid custom WARs.
887 */
906 int present = snd_hda_pin_sense(codec, pin_nid); 888 int present = snd_hda_pin_sense(codec, pin_nid);
907 889
890 memset(eld, 0, sizeof(*eld));
891
908 eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE); 892 eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
909 eld->eld_valid = !!(present & AC_PINSENSE_ELDV); 893 if (eld->monitor_present)
894 eld->eld_valid = !!(present & AC_PINSENSE_ELDV);
895 else
896 eld->eld_valid = 0;
910 897
911 if (present & AC_PINSENSE_ELDV) 898 printk(KERN_INFO
912 hdmi_get_show_eld(codec, pin_nid, eld); 899 "HDMI status: Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
900 pin_nid, eld->monitor_present, eld->eld_valid);
901
902 if (eld->eld_valid)
903 if (!snd_hdmi_get_eld(eld, codec, pin_nid))
904 snd_hdmi_show_eld(eld);
905
906 snd_hda_input_jack_report(codec, pin_nid);
913} 907}
914 908
915static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid) 909static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
@@ -927,7 +921,6 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
927 SND_JACK_VIDEOOUT, NULL); 921 SND_JACK_VIDEOOUT, NULL);
928 if (err < 0) 922 if (err < 0)
929 return err; 923 return err;
930 snd_hda_input_jack_report(codec, pin_nid);
931 924
932 hdmi_present_sense(codec, pin_nid, &spec->sink_eld[spec->num_pins]); 925 hdmi_present_sense(codec, pin_nid, &spec->sink_eld[spec->num_pins]);
933 926
@@ -1034,6 +1027,7 @@ static char *generic_hdmi_pcm_names[MAX_HDMI_CVTS] = {
1034 "HDMI 0", 1027 "HDMI 0",
1035 "HDMI 1", 1028 "HDMI 1",
1036 "HDMI 2", 1029 "HDMI 2",
1030 "HDMI 3",
1037}; 1031};
1038 1032
1039/* 1033/*
@@ -1490,18 +1484,6 @@ static const struct hda_codec_ops nvhdmi_patch_ops_2ch = {
1490 .free = generic_hdmi_free, 1484 .free = generic_hdmi_free,
1491}; 1485};
1492 1486
1493static int patch_nvhdmi_8ch_89(struct hda_codec *codec)
1494{
1495 struct hdmi_spec *spec;
1496 int err = patch_generic_hdmi(codec);
1497
1498 if (err < 0)
1499 return err;
1500 spec = codec->spec;
1501 spec->old_pin_detect = 1;
1502 return 0;
1503}
1504
1505static int patch_nvhdmi_2ch(struct hda_codec *codec) 1487static int patch_nvhdmi_2ch(struct hda_codec *codec)
1506{ 1488{
1507 struct hdmi_spec *spec; 1489 struct hdmi_spec *spec;
@@ -1515,7 +1497,6 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
1515 spec->multiout.num_dacs = 0; /* no analog */ 1497 spec->multiout.num_dacs = 0; /* no analog */
1516 spec->multiout.max_channels = 2; 1498 spec->multiout.max_channels = 2;
1517 spec->multiout.dig_out_nid = nvhdmi_master_con_nid_7x; 1499 spec->multiout.dig_out_nid = nvhdmi_master_con_nid_7x;
1518 spec->old_pin_detect = 1;
1519 spec->num_cvts = 1; 1500 spec->num_cvts = 1;
1520 spec->cvt[0] = nvhdmi_master_con_nid_7x; 1501 spec->cvt[0] = nvhdmi_master_con_nid_7x;
1521 spec->pcm_playback = &nvhdmi_pcm_playback_2ch; 1502 spec->pcm_playback = &nvhdmi_pcm_playback_2ch;
@@ -1658,28 +1639,28 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
1658{ .id = 0x10de0005, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x }, 1639{ .id = 0x10de0005, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x },
1659{ .id = 0x10de0006, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x }, 1640{ .id = 0x10de0006, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x },
1660{ .id = 0x10de0007, .name = "MCP79/7A HDMI", .patch = patch_nvhdmi_8ch_7x }, 1641{ .id = 0x10de0007, .name = "MCP79/7A HDMI", .patch = patch_nvhdmi_8ch_7x },
1661{ .id = 0x10de000a, .name = "GPU 0a HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1642{ .id = 0x10de000a, .name = "GPU 0a HDMI/DP", .patch = patch_generic_hdmi },
1662{ .id = 0x10de000b, .name = "GPU 0b HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1643{ .id = 0x10de000b, .name = "GPU 0b HDMI/DP", .patch = patch_generic_hdmi },
1663{ .id = 0x10de000c, .name = "MCP89 HDMI", .patch = patch_nvhdmi_8ch_89 }, 1644{ .id = 0x10de000c, .name = "MCP89 HDMI", .patch = patch_generic_hdmi },
1664{ .id = 0x10de000d, .name = "GPU 0d HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1645{ .id = 0x10de000d, .name = "GPU 0d HDMI/DP", .patch = patch_generic_hdmi },
1665{ .id = 0x10de0010, .name = "GPU 10 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1646{ .id = 0x10de0010, .name = "GPU 10 HDMI/DP", .patch = patch_generic_hdmi },
1666{ .id = 0x10de0011, .name = "GPU 11 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1647{ .id = 0x10de0011, .name = "GPU 11 HDMI/DP", .patch = patch_generic_hdmi },
1667{ .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1648{ .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_generic_hdmi },
1668{ .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1649{ .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_generic_hdmi },
1669{ .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1650{ .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_generic_hdmi },
1670{ .id = 0x10de0015, .name = "GPU 15 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1651{ .id = 0x10de0015, .name = "GPU 15 HDMI/DP", .patch = patch_generic_hdmi },
1671{ .id = 0x10de0016, .name = "GPU 16 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1652{ .id = 0x10de0016, .name = "GPU 16 HDMI/DP", .patch = patch_generic_hdmi },
1672/* 17 is known to be absent */ 1653/* 17 is known to be absent */
1673{ .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1654{ .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_generic_hdmi },
1674{ .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1655{ .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_generic_hdmi },
1675{ .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1656{ .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_generic_hdmi },
1676{ .id = 0x10de001b, .name = "GPU 1b HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1657{ .id = 0x10de001b, .name = "GPU 1b HDMI/DP", .patch = patch_generic_hdmi },
1677{ .id = 0x10de001c, .name = "GPU 1c HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1658{ .id = 0x10de001c, .name = "GPU 1c HDMI/DP", .patch = patch_generic_hdmi },
1678{ .id = 0x10de0040, .name = "GPU 40 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1659{ .id = 0x10de0040, .name = "GPU 40 HDMI/DP", .patch = patch_generic_hdmi },
1679{ .id = 0x10de0041, .name = "GPU 41 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1660{ .id = 0x10de0041, .name = "GPU 41 HDMI/DP", .patch = patch_generic_hdmi },
1680{ .id = 0x10de0042, .name = "GPU 42 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1661{ .id = 0x10de0042, .name = "GPU 42 HDMI/DP", .patch = patch_generic_hdmi },
1681{ .id = 0x10de0043, .name = "GPU 43 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1662{ .id = 0x10de0043, .name = "GPU 43 HDMI/DP", .patch = patch_generic_hdmi },
1682{ .id = 0x10de0044, .name = "GPU 44 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1663{ .id = 0x10de0044, .name = "GPU 44 HDMI/DP", .patch = patch_generic_hdmi },
1683{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, 1664{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
1684{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, 1665{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
1685{ .id = 0x80860054, .name = "IbexPeak HDMI", .patch = patch_generic_hdmi }, 1666{ .id = 0x80860054, .name = "IbexPeak HDMI", .patch = patch_generic_hdmi },
diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
index 28afbbf69ce0..95572d290c27 100644
--- a/sound/soc/atmel/sam9g20_wm8731.c
+++ b/sound/soc/atmel/sam9g20_wm8731.c
@@ -146,7 +146,7 @@ static int at91sam9g20ek_wm8731_init(struct snd_soc_pcm_runtime *rtd)
146 "at91sam9g20ek_wm8731 " 146 "at91sam9g20ek_wm8731 "
147 ": at91sam9g20ek_wm8731_init() called\n"); 147 ": at91sam9g20ek_wm8731_init() called\n");
148 148
149 ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_XTAL, 149 ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_MCLK,
150 MCLK_RATE, SND_SOC_CLOCK_IN); 150 MCLK_RATE, SND_SOC_CLOCK_IN);
151 if (ret < 0) { 151 if (ret < 0) {
152 printk(KERN_ERR "Failed to set WM8731 SYSCLK: %d\n", ret); 152 printk(KERN_ERR "Failed to set WM8731 SYSCLK: %d\n", ret);
diff --git a/sound/soc/codecs/wm1250-ev1.c b/sound/soc/codecs/wm1250-ev1.c
index 14d0716bf009..bcc208967917 100644
--- a/sound/soc/codecs/wm1250-ev1.c
+++ b/sound/soc/codecs/wm1250-ev1.c
@@ -22,7 +22,7 @@ SND_SOC_DAPM_ADC("ADC", "wm1250-ev1 Capture", SND_SOC_NOPM, 0, 0),
22SND_SOC_DAPM_DAC("DAC", "wm1250-ev1 Playback", SND_SOC_NOPM, 0, 0), 22SND_SOC_DAPM_DAC("DAC", "wm1250-ev1 Playback", SND_SOC_NOPM, 0, 0),
23 23
24SND_SOC_DAPM_INPUT("WM1250 Input"), 24SND_SOC_DAPM_INPUT("WM1250 Input"),
25SND_SOC_DAPM_INPUT("WM1250 Output"), 25SND_SOC_DAPM_OUTPUT("WM1250 Output"),
26}; 26};
27 27
28static const struct snd_soc_dapm_route wm1250_ev1_dapm_routes[] = { 28static const struct snd_soc_dapm_route wm1250_ev1_dapm_routes[] = {
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 6dec7cee2cb4..2dc964b55e4f 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -198,7 +198,7 @@ static int wm8731_check_osc(struct snd_soc_dapm_widget *source,
198{ 198{
199 struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(source->codec); 199 struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(source->codec);
200 200
201 return wm8731->sysclk_type == WM8731_SYSCLK_MCLK; 201 return wm8731->sysclk_type == WM8731_SYSCLK_XTAL;
202} 202}
203 203
204static const struct snd_soc_dapm_route wm8731_intercon[] = { 204static const struct snd_soc_dapm_route wm8731_intercon[] = {
diff --git a/sound/soc/codecs/wm8915.c b/sound/soc/codecs/wm8915.c
index ccc9bd832794..a0b1a7278284 100644
--- a/sound/soc/codecs/wm8915.c
+++ b/sound/soc/codecs/wm8915.c
@@ -19,7 +19,6 @@
19#include <linux/gcd.h> 19#include <linux/gcd.h>
20#include <linux/gpio.h> 20#include <linux/gpio.h>
21#include <linux/i2c.h> 21#include <linux/i2c.h>
22#include <linux/delay.h>
23#include <linux/regulator/consumer.h> 22#include <linux/regulator/consumer.h>
24#include <linux/slab.h> 23#include <linux/slab.h>
25#include <linux/workqueue.h> 24#include <linux/workqueue.h>
diff --git a/sound/soc/pxa/raumfeld.c b/sound/soc/pxa/raumfeld.c
index 2afabaf59491..1a591f1ebfbd 100644
--- a/sound/soc/pxa/raumfeld.c
+++ b/sound/soc/pxa/raumfeld.c
@@ -151,13 +151,13 @@ static struct snd_soc_ops raumfeld_cs4270_ops = {
151 .hw_params = raumfeld_cs4270_hw_params, 151 .hw_params = raumfeld_cs4270_hw_params,
152}; 152};
153 153
154static int raumfeld_line_suspend(struct snd_soc_card *card) 154static int raumfeld_analog_suspend(struct snd_soc_card *card)
155{ 155{
156 raumfeld_enable_audio(false); 156 raumfeld_enable_audio(false);
157 return 0; 157 return 0;
158} 158}
159 159
160static int raumfeld_line_resume(struct snd_soc_card *card) 160static int raumfeld_analog_resume(struct snd_soc_card *card)
161{ 161{
162 raumfeld_enable_audio(true); 162 raumfeld_enable_audio(true);
163 return 0; 163 return 0;
@@ -225,32 +225,53 @@ static struct snd_soc_ops raumfeld_ak4104_ops = {
225 .hw_params = raumfeld_ak4104_hw_params, 225 .hw_params = raumfeld_ak4104_hw_params,
226}; 226};
227 227
228static struct snd_soc_dai_link raumfeld_dai[] = { 228#define DAI_LINK_CS4270 \
229{ \
230 .name = "CS4270", \
231 .stream_name = "CS4270", \
232 .cpu_dai_name = "pxa-ssp-dai.0", \
233 .platform_name = "pxa-pcm-audio", \
234 .codec_dai_name = "cs4270-hifi", \
235 .codec_name = "cs4270-codec.0-0048", \
236 .ops = &raumfeld_cs4270_ops, \
237}
238
239#define DAI_LINK_AK4104 \
240{ \
241 .name = "ak4104", \
242 .stream_name = "Playback", \
243 .cpu_dai_name = "pxa-ssp-dai.1", \
244 .codec_dai_name = "ak4104-hifi", \
245 .platform_name = "pxa-pcm-audio", \
246 .ops = &raumfeld_ak4104_ops, \
247 .codec_name = "spi0.0", \
248}
249
250static struct snd_soc_dai_link snd_soc_raumfeld_connector_dai[] =
229{ 251{
230 .name = "ak4104", 252 DAI_LINK_CS4270,
231 .stream_name = "Playback", 253 DAI_LINK_AK4104,
232 .cpu_dai_name = "pxa-ssp-dai.1", 254};
233 .codec_dai_name = "ak4104-hifi", 255
234 .platform_name = "pxa-pcm-audio", 256static struct snd_soc_dai_link snd_soc_raumfeld_speaker_dai[] =
235 .ops = &raumfeld_ak4104_ops,
236 .codec_name = "ak4104-codec.0",
237},
238{ 257{
239 .name = "CS4270", 258 DAI_LINK_CS4270,
240 .stream_name = "CS4270", 259};
241 .cpu_dai_name = "pxa-ssp-dai.0", 260
242 .platform_name = "pxa-pcm-audio", 261static struct snd_soc_card snd_soc_raumfeld_connector = {
243 .codec_dai_name = "cs4270-hifi", 262 .name = "Raumfeld Connector",
244 .codec_name = "cs4270-codec.0-0048", 263 .dai_link = snd_soc_raumfeld_connector_dai,
245 .ops = &raumfeld_cs4270_ops, 264 .num_links = ARRAY_SIZE(snd_soc_raumfeld_connector_dai),
246},}; 265 .suspend_post = raumfeld_analog_suspend,
247 266 .resume_pre = raumfeld_analog_resume,
248static struct snd_soc_card snd_soc_raumfeld = { 267};
249 .name = "Raumfeld", 268
250 .dai_link = raumfeld_dai, 269static struct snd_soc_card snd_soc_raumfeld_speaker = {
251 .suspend_post = raumfeld_line_suspend, 270 .name = "Raumfeld Speaker",
252 .resume_pre = raumfeld_line_resume, 271 .dai_link = snd_soc_raumfeld_speaker_dai,
253 .num_links = ARRAY_SIZE(raumfeld_dai), 272 .num_links = ARRAY_SIZE(snd_soc_raumfeld_speaker_dai),
273 .suspend_post = raumfeld_analog_suspend,
274 .resume_pre = raumfeld_analog_resume,
254}; 275};
255 276
256static struct platform_device *raumfeld_audio_device; 277static struct platform_device *raumfeld_audio_device;
@@ -271,22 +292,25 @@ static int __init raumfeld_audio_init(void)
271 292
272 set_max9485_clk(MAX9485_MCLK_FREQ_122880); 293 set_max9485_clk(MAX9485_MCLK_FREQ_122880);
273 294
274 /* Register LINE and SPDIF */ 295 /* Register analog device */
275 raumfeld_audio_device = platform_device_alloc("soc-audio", 0); 296 raumfeld_audio_device = platform_device_alloc("soc-audio", 0);
276 if (!raumfeld_audio_device) 297 if (!raumfeld_audio_device)
277 return -ENOMEM; 298 return -ENOMEM;
278 299
279 platform_set_drvdata(raumfeld_audio_device,
280 &snd_soc_raumfeld);
281 ret = platform_device_add(raumfeld_audio_device);
282
283 /* no S/PDIF on Speakers */
284 if (machine_is_raumfeld_speaker()) 300 if (machine_is_raumfeld_speaker())
301 platform_set_drvdata(raumfeld_audio_device,
302 &snd_soc_raumfeld_speaker);
303
304 if (machine_is_raumfeld_connector())
305 platform_set_drvdata(raumfeld_audio_device,
306 &snd_soc_raumfeld_connector);
307
308 ret = platform_device_add(raumfeld_audio_device);
309 if (ret < 0)
285 return ret; 310 return ret;
286 311
287 raumfeld_enable_audio(true); 312 raumfeld_enable_audio(true);
288 313 return 0;
289 return ret;
290} 314}
291 315
292static void __exit raumfeld_audio_exit(void) 316static void __exit raumfeld_audio_exit(void)
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index 459566bfcd35..d155cbb58e1c 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -1,6 +1,6 @@
1config SND_SOC_SAMSUNG 1config SND_SOC_SAMSUNG
2 tristate "ASoC support for Samsung" 2 tristate "ASoC support for Samsung"
3 depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_S5P64X0 || ARCH_S5P6442 || ARCH_EXYNOS4 3 depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_S5P64X0 || ARCH_EXYNOS4
4 select S3C64XX_DMA if ARCH_S3C64XX 4 select S3C64XX_DMA if ARCH_S3C64XX
5 select S3C2410_DMA if ARCH_S3C2410 5 select S3C2410_DMA if ARCH_S3C2410
6 help 6 help
@@ -55,7 +55,7 @@ config SND_SOC_SAMSUNG_JIVE_WM8750
55 55
56config SND_SOC_SAMSUNG_SMDK_WM8580 56config SND_SOC_SAMSUNG_SMDK_WM8580
57 tristate "SoC I2S Audio support for WM8580 on SMDK" 57 tristate "SoC I2S Audio support for WM8580 on SMDK"
58 depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDK6440 || MACH_SMDK6450 || MACH_SMDK6442 || MACH_SMDKV210 || MACH_SMDKC110) 58 depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDK6440 || MACH_SMDK6450 || MACH_SMDKV210 || MACH_SMDKC110)
59 select SND_SOC_WM8580 59 select SND_SOC_WM8580
60 select SND_SAMSUNG_I2S 60 select SND_SAMSUNG_I2S
61 help 61 help
diff --git a/sound/soc/samsung/smdk_wm8580.c b/sound/soc/samsung/smdk_wm8580.c
index 8aacf23d6f3a..3d26f6607aa4 100644
--- a/sound/soc/samsung/smdk_wm8580.c
+++ b/sound/soc/samsung/smdk_wm8580.c
@@ -249,7 +249,7 @@ static int __init smdk_audio_init(void)
249 int ret; 249 int ret;
250 char *str; 250 char *str;
251 251
252 if (machine_is_smdkc100() || machine_is_smdk6442() 252 if (machine_is_smdkc100()
253 || machine_is_smdkv210() || machine_is_smdkc110()) { 253 || machine_is_smdkv210() || machine_is_smdkc110()) {
254 smdk.num_links = 3; 254 smdk.num_links = 3;
255 /* Secondary is at offset SAMSUNG_I2S_SECOFF from Primary */ 255 /* Secondary is at offset SAMSUNG_I2S_SECOFF from Primary */
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index bb7cd5812945..d75043ed7fc0 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1306,10 +1306,6 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)
1306 /* no, then find CPU DAI from registered DAIs*/ 1306 /* no, then find CPU DAI from registered DAIs*/
1307 list_for_each_entry(cpu_dai, &dai_list, list) { 1307 list_for_each_entry(cpu_dai, &dai_list, list) {
1308 if (!strcmp(cpu_dai->name, dai_link->cpu_dai_name)) { 1308 if (!strcmp(cpu_dai->name, dai_link->cpu_dai_name)) {
1309
1310 if (!try_module_get(cpu_dai->dev->driver->owner))
1311 return -ENODEV;
1312
1313 rtd->cpu_dai = cpu_dai; 1309 rtd->cpu_dai = cpu_dai;
1314 goto find_codec; 1310 goto find_codec;
1315 } 1311 }
@@ -1622,11 +1618,15 @@ static int soc_probe_dai_link(struct snd_soc_card *card, int num)
1622 1618
1623 /* probe the cpu_dai */ 1619 /* probe the cpu_dai */
1624 if (!cpu_dai->probed) { 1620 if (!cpu_dai->probed) {
1621 if (!try_module_get(cpu_dai->dev->driver->owner))
1622 return -ENODEV;
1623
1625 if (cpu_dai->driver->probe) { 1624 if (cpu_dai->driver->probe) {
1626 ret = cpu_dai->driver->probe(cpu_dai); 1625 ret = cpu_dai->driver->probe(cpu_dai);
1627 if (ret < 0) { 1626 if (ret < 0) {
1628 printk(KERN_ERR "asoc: failed to probe CPU DAI %s\n", 1627 printk(KERN_ERR "asoc: failed to probe CPU DAI %s\n",
1629 cpu_dai->name); 1628 cpu_dai->name);
1629 module_put(cpu_dai->dev->driver->owner);
1630 return ret; 1630 return ret;
1631 } 1631 }
1632 } 1632 }
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 456617e63789..999bb08cdfb1 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1110,7 +1110,7 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
1110 trace_snd_soc_dapm_start(card); 1110 trace_snd_soc_dapm_start(card);
1111 1111
1112 list_for_each_entry(d, &card->dapm_list, list) 1112 list_for_each_entry(d, &card->dapm_list, list)
1113 if (d->n_widgets) 1113 if (d->n_widgets || d->codec == NULL)
1114 d->dev_power = 0; 1114 d->dev_power = 0;
1115 1115
1116 /* Check which widgets we need to power and store them in 1116 /* Check which widgets we need to power and store them in
diff --git a/sound/usb/card.c b/sound/usb/card.c
index a90662af2d6b..220c6167dd86 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -48,6 +48,7 @@
48#include <linux/usb/audio.h> 48#include <linux/usb/audio.h>
49#include <linux/usb/audio-v2.h> 49#include <linux/usb/audio-v2.h>
50 50
51#include <sound/control.h>
51#include <sound/core.h> 52#include <sound/core.h>
52#include <sound/info.h> 53#include <sound/info.h>
53#include <sound/pcm.h> 54#include <sound/pcm.h>
@@ -492,14 +493,6 @@ static void *snd_usb_audio_probe(struct usb_device *dev,
492 } 493 }
493 } 494 }
494 495
495 chip->txfr_quirk = 0;
496 err = 1; /* continue */
497 if (quirk && quirk->ifnum != QUIRK_NO_INTERFACE) {
498 /* need some special handlings */
499 if ((err = snd_usb_create_quirk(chip, intf, &usb_audio_driver, quirk)) < 0)
500 goto __error;
501 }
502
503 /* 496 /*
504 * For devices with more than one control interface, we assume the 497 * For devices with more than one control interface, we assume the
505 * first contains the audio controls. We might need a more specific 498 * first contains the audio controls. We might need a more specific
@@ -508,6 +501,14 @@ static void *snd_usb_audio_probe(struct usb_device *dev,
508 if (!chip->ctrl_intf) 501 if (!chip->ctrl_intf)
509 chip->ctrl_intf = alts; 502 chip->ctrl_intf = alts;
510 503
504 chip->txfr_quirk = 0;
505 err = 1; /* continue */
506 if (quirk && quirk->ifnum != QUIRK_NO_INTERFACE) {
507 /* need some special handlings */
508 if ((err = snd_usb_create_quirk(chip, intf, &usb_audio_driver, quirk)) < 0)
509 goto __error;
510 }
511
511 if (err > 0) { 512 if (err > 0) {
512 /* create normal USB audio interfaces */ 513 /* create normal USB audio interfaces */
513 if (snd_usb_create_streams(chip, ifnum) < 0 || 514 if (snd_usb_create_streams(chip, ifnum) < 0 ||
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index eab06edcc9b7..c22fa76e363a 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -86,16 +86,6 @@ struct mixer_build {
86 const struct usbmix_selector_map *selector_map; 86 const struct usbmix_selector_map *selector_map;
87}; 87};
88 88
89enum {
90 USB_MIXER_BOOLEAN,
91 USB_MIXER_INV_BOOLEAN,
92 USB_MIXER_S8,
93 USB_MIXER_U8,
94 USB_MIXER_S16,
95 USB_MIXER_U16,
96};
97
98
99/*E-mu 0202/0404/0204 eXtension Unit(XU) control*/ 89/*E-mu 0202/0404/0204 eXtension Unit(XU) control*/
100enum { 90enum {
101 USB_XU_CLOCK_RATE = 0xe301, 91 USB_XU_CLOCK_RATE = 0xe301,
@@ -535,20 +525,21 @@ static int check_matrix_bitmap(unsigned char *bmap, int ich, int och, int num_ou
535 * if failed, give up and free the control instance. 525 * if failed, give up and free the control instance.
536 */ 526 */
537 527
538static int add_control_to_empty(struct mixer_build *state, struct snd_kcontrol *kctl) 528int snd_usb_mixer_add_control(struct usb_mixer_interface *mixer,
529 struct snd_kcontrol *kctl)
539{ 530{
540 struct usb_mixer_elem_info *cval = kctl->private_data; 531 struct usb_mixer_elem_info *cval = kctl->private_data;
541 int err; 532 int err;
542 533
543 while (snd_ctl_find_id(state->chip->card, &kctl->id)) 534 while (snd_ctl_find_id(mixer->chip->card, &kctl->id))
544 kctl->id.index++; 535 kctl->id.index++;
545 if ((err = snd_ctl_add(state->chip->card, kctl)) < 0) { 536 if ((err = snd_ctl_add(mixer->chip->card, kctl)) < 0) {
546 snd_printd(KERN_ERR "cannot add control (err = %d)\n", err); 537 snd_printd(KERN_ERR "cannot add control (err = %d)\n", err);
547 return err; 538 return err;
548 } 539 }
549 cval->elem_id = &kctl->id; 540 cval->elem_id = &kctl->id;
550 cval->next_id_elem = state->mixer->id_elems[cval->id]; 541 cval->next_id_elem = mixer->id_elems[cval->id];
551 state->mixer->id_elems[cval->id] = cval; 542 mixer->id_elems[cval->id] = cval;
552 return 0; 543 return 0;
553} 544}
554 545
@@ -984,6 +975,9 @@ static struct snd_kcontrol_new usb_feature_unit_ctl_ro = {
984 .put = NULL, 975 .put = NULL,
985}; 976};
986 977
978/* This symbol is exported in order to allow the mixer quirks to
979 * hook up to the standard feature unit control mechanism */
980struct snd_kcontrol_new *snd_usb_feature_unit_ctl = &usb_feature_unit_ctl;
987 981
988/* 982/*
989 * build a feature control 983 * build a feature control
@@ -1176,7 +1170,7 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
1176 1170
1177 snd_printdd(KERN_INFO "[%d] FU [%s] ch = %d, val = %d/%d/%d\n", 1171 snd_printdd(KERN_INFO "[%d] FU [%s] ch = %d, val = %d/%d/%d\n",
1178 cval->id, kctl->id.name, cval->channels, cval->min, cval->max, cval->res); 1172 cval->id, kctl->id.name, cval->channels, cval->min, cval->max, cval->res);
1179 add_control_to_empty(state, kctl); 1173 snd_usb_mixer_add_control(state->mixer, kctl);
1180} 1174}
1181 1175
1182 1176
@@ -1340,7 +1334,7 @@ static void build_mixer_unit_ctl(struct mixer_build *state,
1340 1334
1341 snd_printdd(KERN_INFO "[%d] MU [%s] ch = %d, val = %d/%d\n", 1335 snd_printdd(KERN_INFO "[%d] MU [%s] ch = %d, val = %d/%d\n",
1342 cval->id, kctl->id.name, cval->channels, cval->min, cval->max); 1336 cval->id, kctl->id.name, cval->channels, cval->min, cval->max);
1343 add_control_to_empty(state, kctl); 1337 snd_usb_mixer_add_control(state->mixer, kctl);
1344} 1338}
1345 1339
1346 1340
@@ -1641,7 +1635,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, void *raw
1641 1635
1642 snd_printdd(KERN_INFO "[%d] PU [%s] ch = %d, val = %d/%d\n", 1636 snd_printdd(KERN_INFO "[%d] PU [%s] ch = %d, val = %d/%d\n",
1643 cval->id, kctl->id.name, cval->channels, cval->min, cval->max); 1637 cval->id, kctl->id.name, cval->channels, cval->min, cval->max);
1644 if ((err = add_control_to_empty(state, kctl)) < 0) 1638 if ((err = snd_usb_mixer_add_control(state->mixer, kctl)) < 0)
1645 return err; 1639 return err;
1646 } 1640 }
1647 return 0; 1641 return 0;
@@ -1858,7 +1852,7 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, void
1858 1852
1859 snd_printdd(KERN_INFO "[%d] SU [%s] items = %d\n", 1853 snd_printdd(KERN_INFO "[%d] SU [%s] items = %d\n",
1860 cval->id, kctl->id.name, desc->bNrInPins); 1854 cval->id, kctl->id.name, desc->bNrInPins);
1861 if ((err = add_control_to_empty(state, kctl)) < 0) 1855 if ((err = snd_usb_mixer_add_control(state->mixer, kctl)) < 0)
1862 return err; 1856 return err;
1863 1857
1864 return 0; 1858 return 0;
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index b4a2c8165e4b..ae1a14dcfe82 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -24,7 +24,16 @@ struct usb_mixer_interface {
24 u8 xonar_u1_status; 24 u8 xonar_u1_status;
25}; 25};
26 26
27#define MAX_CHANNELS 10 /* max logical channels */ 27#define MAX_CHANNELS 16 /* max logical channels */
28
29enum {
30 USB_MIXER_BOOLEAN,
31 USB_MIXER_INV_BOOLEAN,
32 USB_MIXER_S8,
33 USB_MIXER_U8,
34 USB_MIXER_S16,
35 USB_MIXER_U16,
36};
28 37
29struct usb_mixer_elem_info { 38struct usb_mixer_elem_info {
30 struct usb_mixer_interface *mixer; 39 struct usb_mixer_interface *mixer;
@@ -55,4 +64,7 @@ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
55void snd_usb_mixer_inactivate(struct usb_mixer_interface *mixer); 64void snd_usb_mixer_inactivate(struct usb_mixer_interface *mixer);
56int snd_usb_mixer_activate(struct usb_mixer_interface *mixer); 65int snd_usb_mixer_activate(struct usb_mixer_interface *mixer);
57 66
67int snd_usb_mixer_add_control(struct usb_mixer_interface *mixer,
68 struct snd_kcontrol *kctl);
69
58#endif /* __USBMIXER_H */ 70#endif /* __USBMIXER_H */
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 9146cffa6ede..3d0f4873112b 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -40,6 +40,8 @@
40#include "mixer_quirks.h" 40#include "mixer_quirks.h"
41#include "helper.h" 41#include "helper.h"
42 42
43extern struct snd_kcontrol_new *snd_usb_feature_unit_ctl;
44
43/* 45/*
44 * Sound Blaster remote control configuration 46 * Sound Blaster remote control configuration
45 * 47 *
@@ -492,6 +494,69 @@ static int snd_nativeinstruments_create_mixer(struct usb_mixer_interface *mixer,
492 return err; 494 return err;
493} 495}
494 496
497/* M-Audio FastTrack Ultra quirks */
498
499/* private_free callback */
500static void usb_mixer_elem_free(struct snd_kcontrol *kctl)
501{
502 kfree(kctl->private_data);
503 kctl->private_data = NULL;
504}
505
506static int snd_maudio_ftu_create_ctl(struct usb_mixer_interface *mixer,
507 int in, int out, const char *name)
508{
509 struct usb_mixer_elem_info *cval;
510 struct snd_kcontrol *kctl;
511
512 cval = kzalloc(sizeof(*cval), GFP_KERNEL);
513 if (!cval)
514 return -ENOMEM;
515
516 cval->id = 5;
517 cval->mixer = mixer;
518 cval->val_type = USB_MIXER_S16;
519 cval->channels = 1;
520 cval->control = out + 1;
521 cval->cmask = 1 << in;
522
523 kctl = snd_ctl_new1(snd_usb_feature_unit_ctl, cval);
524 if (!kctl) {
525 kfree(cval);
526 return -ENOMEM;
527 }
528
529 snprintf(kctl->id.name, sizeof(kctl->id.name), name);
530 kctl->private_free = usb_mixer_elem_free;
531 return snd_usb_mixer_add_control(mixer, kctl);
532}
533
534static int snd_maudio_ftu_create_mixer(struct usb_mixer_interface *mixer)
535{
536 char name[64];
537 int in, out, err;
538
539 for (out = 0; out < 8; out++) {
540 for (in = 0; in < 8; in++) {
541 snprintf(name, sizeof(name),
542 "AIn%d - Out%d Capture Volume", in + 1, out + 1);
543 err = snd_maudio_ftu_create_ctl(mixer, in, out, name);
544 if (err < 0)
545 return err;
546 }
547
548 for (in = 8; in < 16; in++) {
549 snprintf(name, sizeof(name),
550 "DIn%d - Out%d Playback Volume", in - 7, out + 1);
551 err = snd_maudio_ftu_create_ctl(mixer, in, out, name);
552 if (err < 0)
553 return err;
554 }
555 }
556
557 return 0;
558}
559
495void snd_emuusb_set_samplerate(struct snd_usb_audio *chip, 560void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
496 unsigned char samplerate_id) 561 unsigned char samplerate_id)
497{ 562{
@@ -533,6 +598,11 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
533 snd_audigy2nx_proc_read); 598 snd_audigy2nx_proc_read);
534 break; 599 break;
535 600
601 case USB_ID(0x0763, 0x2080): /* M-Audio Fast Track Ultra */
602 case USB_ID(0x0763, 0x2081): /* M-Audio Fast Track Ultra 8R */
603 err = snd_maudio_ftu_create_mixer(mixer);
604 break;
605
536 case USB_ID(0x0b05, 0x1739): 606 case USB_ID(0x0b05, 0x1739):
537 case USB_ID(0x0b05, 0x1743): 607 case USB_ID(0x0b05, 0x1743):
538 err = snd_xonar_u1_controls_create(mixer); 608 err = snd_xonar_u1_controls_create(mixer);
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 78792a8900c3..0b2ae8e1c02d 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -1988,7 +1988,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
1988 .data = & (const struct snd_usb_audio_quirk[]) { 1988 .data = & (const struct snd_usb_audio_quirk[]) {
1989 { 1989 {
1990 .ifnum = 0, 1990 .ifnum = 0,
1991 .type = QUIRK_IGNORE_INTERFACE 1991 .type = QUIRK_AUDIO_STANDARD_MIXER,
1992 }, 1992 },
1993 { 1993 {
1994 .ifnum = 1, 1994 .ifnum = 1,
@@ -2055,7 +2055,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
2055 .data = & (const struct snd_usb_audio_quirk[]) { 2055 .data = & (const struct snd_usb_audio_quirk[]) {
2056 { 2056 {
2057 .ifnum = 0, 2057 .ifnum = 0,
2058 .type = QUIRK_IGNORE_INTERFACE 2058 .type = QUIRK_AUDIO_STANDARD_MIXER,
2059 }, 2059 },
2060 { 2060 {
2061 .ifnum = 1, 2061 .ifnum = 1,
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index bd13d7257240..2e969cbb393b 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -19,6 +19,7 @@
19#include <linux/usb.h> 19#include <linux/usb.h>
20#include <linux/usb/audio.h> 20#include <linux/usb/audio.h>
21 21
22#include <sound/control.h>
22#include <sound/core.h> 23#include <sound/core.h>
23#include <sound/info.h> 24#include <sound/info.h>
24#include <sound/pcm.h> 25#include <sound/pcm.h>
@@ -263,6 +264,20 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
263} 264}
264 265
265/* 266/*
267 * Create a standard mixer for the specified interface.
268 */
269static int create_standard_mixer_quirk(struct snd_usb_audio *chip,
270 struct usb_interface *iface,
271 struct usb_driver *driver,
272 const struct snd_usb_audio_quirk *quirk)
273{
274 if (quirk->ifnum < 0)
275 return 0;
276
277 return snd_usb_create_mixer(chip, quirk->ifnum, 0);
278}
279
280/*
266 * audio-interface quirks 281 * audio-interface quirks
267 * 282 *
268 * returns zero if no standard audio/MIDI parsing is needed. 283 * returns zero if no standard audio/MIDI parsing is needed.
@@ -294,7 +309,8 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
294 [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk, 309 [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk,
295 [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk, 310 [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk,
296 [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk, 311 [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk,
297 [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk 312 [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk,
313 [QUIRK_AUDIO_STANDARD_MIXER] = create_standard_mixer_quirk,
298 }; 314 };
299 315
300 if (quirk->type < QUIRK_TYPE_COUNT) { 316 if (quirk->type < QUIRK_TYPE_COUNT) {
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index 32f2a97f2f14..1e79986b5777 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -84,6 +84,7 @@ enum quirk_type {
84 QUIRK_AUDIO_FIXED_ENDPOINT, 84 QUIRK_AUDIO_FIXED_ENDPOINT,
85 QUIRK_AUDIO_EDIROL_UAXX, 85 QUIRK_AUDIO_EDIROL_UAXX,
86 QUIRK_AUDIO_ALIGN_TRANSFER, 86 QUIRK_AUDIO_ALIGN_TRANSFER,
87 QUIRK_AUDIO_STANDARD_MIXER,
87 88
88 QUIRK_TYPE_COUNT 89 QUIRK_TYPE_COUNT
89}; 90};